source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_unop__minv_int32_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_int32_int32)
// op(A') function: GB (_unop_tran__minv_int32_int32)
// C type: int32_t
// A type: int32_t
// cast: int32_t cij = aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 32)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 32) ;
// casting
#define GB_CAST(z, aij) \
int32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = aij ; \
Cx [pC] = GB_IMINV_SIGNED (z, 32) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_int32_int32)
(
int32_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 32) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 32) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_int32_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__div_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__div_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__div_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__div_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_int8)
// A*D function (colscale): GB (_AxD__div_int8)
// D*A function (rowscale): GB (_DxB__div_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__div_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__div_int8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_int8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_int8)
// C=scalar+B GB (_bind1st__div_int8)
// C=scalar+B' GB (_bind1st_tran__div_int8)
// C=A+scalar GB (_bind2nd__div_int8)
// C=A'+scalar GB (_bind2nd_tran__div_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_IDIV_SIGNED (aij, bij, 8)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_SIGNED (x, y, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_INT8 || GxB_NO_DIV_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__div_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_SIGNED (x, bij, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_SIGNED (aij, y, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (x, aij, 8) ; \
}
GrB_Info GB (_bind1st_tran__div_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (aij, y, 8) ; \
}
GrB_Info GB (_bind2nd_tran__div_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
nonneg.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "../admm.h"
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
/**
* @brief The proximal update for a non-negative factorization. This routine
* projects 'primal' onto the non-negative orthant. Simply, it zeroes
* out negative entries.
*
* @param[out] primal The row-major matrix to update.
* @param nrows The number of rows in primal.
* @param ncols The number of columns in primal.
* @param offset Not used.
* @param data Not used.
* @param rho Not used.
* @param should_parallelize If true, parallelize.
*/
void splatt_nonneg_prox(
val_t * primal,
idx_t const nrows,
idx_t const ncols,
idx_t const offset,
void * data,
val_t const rho,
bool const should_parallelize)
{
#pragma omp parallel for if(should_parallelize)
for(idx_t i=0; i < nrows; ++i) {
for(idx_t j=0; j < ncols; ++j) {
idx_t const index = j + (i*ncols);
primal[index] = (primal[index] > 0.) ? primal[index] : 0.;
}
}
}
/******************************************************************************
* API FUNCTIONS
*****************************************************************************/
splatt_error_type splatt_register_nonneg(
splatt_cpd_opts * opts,
splatt_idx_t const * const modes_included,
splatt_idx_t const num_modes)
{
for(idx_t m = 0; m < num_modes; ++m) {
idx_t const mode = modes_included[m];
splatt_cpd_constraint * ntf_con = splatt_alloc_constraint(SPLATT_CON_ADMM);
/* only fill the details that are used */
ntf_con->prox_func = splatt_nonneg_prox;
/* set hints to assist optimizations */
ntf_con->hints.row_separable = true;
ntf_con->hints.sparsity_inducing = true;
sprintf(ntf_con->description, "NON-NEGATIVE");
/* add to the CPD factorization */
splatt_register_constraint(opts, mode, ntf_con);
/* memory will be freed by splatt_free_constraint() */
}
return SPLATT_SUCCESS;
}
|
rhs.h | #include <math.h>
#include <stdio.h>
#include <iostream>
#include <omp.h>
#define REAL double
void calc_aux_cy(REAL *q , int qSize,
REAL *xq0 , int xq0Size,
REAL *xq1 , int xq1Size,
REAL *xq2 , int xq2Size,
REAL *xi , int xiSize,
REAL *yi , int yiSize,
REAL *zi , int ziSize,
REAL *normal0 , int normal0Size,
REAL *normal1 , int normal1Size,
REAL *normal2 , int normal2Size,
int stype,
REAL *aux , int auxSize,
REAL E);
void calc_aux_cy(REAL *q , int qSize,
REAL *xq0 , int xq0Size,
REAL *xq1 , int xq1Size,
REAL *xq2 , int xq2Size,
REAL *xi , int xiSize,
REAL *yi , int yiSize,
REAL *zi , int ziSize,
REAL *normal0 , int normal0Size,
REAL *normal1 , int normal1Size,
REAL *normal2 , int normal2Size,
int stype,
REAL *aux , int auxSize,
REAL E)
{
#pragma omp parallel default(none) shared(qSize, xiSize, xi, yi, zi, xq0, xq1, xq2, auxSize, aux, q, normal0, normal1, normal2, E, stype)
{
REAL auxiliar;
#pragma omp for nowait
for (int j = 0; j < xiSize; j++)
{
for (int i=0; i<qSize; i++)
{
if (stype == 1)
{
auxiliar = - ( q[i] / ( ( sqrt( (xi[j] - xq0[i]) * (xi[j] - xq0[i]) + (yi[j] - xq1[i]) * (yi[j] - xq1[i]) + (zi[j] - xq2[i]) * (zi[j] - xq2[i]) ) ) * ( sqrt( (xi[j] - xq0[i]) * (xi[j] - xq0[i]) + (yi[j] - xq1[i]) * (yi[j] - xq1[i]) + (zi[j] - xq2[i]) * (zi[j] - xq2[i]) ) ) * ( sqrt( (xi[j] - xq0[i]) * (xi[j] - xq0[i]) + (yi[j] - xq1[i]) * (yi[j] - xq1[i]) + (zi[j] - xq2[i]) * (zi[j] - xq2[i]) ) ) ) * ( (xi[j] - xq0[i]) * normal0[j] + (yi[j] - xq1[i]) * normal1[j] + (zi[j] - xq2[i]) * normal2[j] ) );
} else
{
auxiliar = q[i] / (E * ( sqrt( (xi[j] - xq0[i]) * (xi[j] - xq0[i]) + (yi[j] - xq1[i]) * (yi[j] - xq1[i]) + (zi[j] - xq2[i]) * (zi[j] - xq2[i]) ) )) ;
}
aux[j] = aux[j] + auxiliar;
}
}
}
};
|
SybasePROP_fmt_plug.c | /* SybasePROP cracker. Hacked together during November of 2013 by Dhiru Kholia
* <dhiru [at] openwall.com>.
*
* This software is Copyright (c) 2013, Dhiru Kholia <dhiru [at] openwall.com>,
* Frank Benhamou, Gregory Terrien and Marcel Major and it is hereby released
* to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* All credits for reversing this algorithm go to Marcel Major, Frank Benhamou
* and Gregory Terrien. Dhiru Kholia just glued together the bits (as usual!).
*
* [1] http://www.nes.fr/securitylab/?p=1128 (in French!)
*
* [2] https://hacktivity.com/hu/letoltesek/archivum/57/
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sybaseprop;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sybaseprop);
#else
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "syb-prop_repro.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 16
#endif
static int omp_t = 1;
#endif
#include "memdbg.h"
#define BLOCK_SIZE 8
#define FORMAT_LABEL "Sybase-PROP"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "salted FEAL-8 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 64
#define CIPHERTEXT_LENGTH (6 + 56)
#define PREFIX_VALUE "0x"
#define PREFIX_LENGTH 2
#define BINARY_SIZE 56 / 2
#define BINARY_ALIGN 4
#define SALT_SIZE 1 // see the definition of generate_hash, note "unsigned char seed" argument
#define SALT_SIZE_HEX 2
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 128
static struct fmt_tests SybasePROP_tests[] = {
{"0x2905aeb3d00e3b80fb0695cb34c9fa9080f84ae1824b24cc51a3849dcb06", "test11"},
{"0x3f05fc3d526946d9936c63dd798c5fa1b980747b1d81d0b9b2e8197d2aca", "test12"},
{NULL}
};
static unsigned char saved_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
if (omp_t > 1) {
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
}
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext + PREFIX_LENGTH;
int extra;
if (strncmp(ciphertext, PREFIX_VALUE, PREFIX_LENGTH))
return 0;
if (hexlenl(p, &extra) != CIPHERTEXT_LENGTH-PREFIX_LENGTH || extra)
return 0;
return 1;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = ciphertext + PREFIX_LENGTH + SALT_SIZE_HEX + 2; // last 2 bytes always seem to be "05"
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void *get_salt(char *ciphertext)
{
char *p = ciphertext + PREFIX_LENGTH;
static unsigned char salt;
salt = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
return (void*)&salt;
}
static void set_salt(void *salt)
{
saved_salt = ((unsigned char*)salt)[0];
}
static void set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
generate_hash((unsigned char*)saved_key[index], saved_salt,
(unsigned char*)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
struct fmt_main fmt_sybaseprop = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ PREFIX_VALUE },
SybasePROP_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
target_data_messages.c | // RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify=expected,omp45 -fopenmp -fopenmp-version=45 -ferror-limit 100 -o - %s -Wuninitialized
// RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify=expected,omp50 -fopenmp -fopenmp-version=50 -ferror-limit 100 -o - %s -Wuninitialized
// RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify=expected,omp45 -fopenmp-simd -fopenmp-version=45 -ferror-limit 100 -o - %s -Wuninitialized
// RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify=expected,omp50 -fopenmp-simd -fopenmp-version=50 -ferror-limit 100 -o - %s -Wuninitialized
void foo(void) { }
void xxx(int argc) {
int map; // expected-note {{initialize the variable 'map' to silence this warning}}
#pragma omp target data map(map) // expected-warning {{variable 'map' is uninitialized when used here}}
for (int i = 0; i < 10; ++i)
;
}
int main(int argc, char **argv) {
int a;
#pragma omp target data // omp45-error {{expected at least one 'map' or 'use_device_ptr' clause for '#pragma omp target data'}} omp50-error {{expected at least one 'map', 'use_device_ptr', or 'use_device_addr' clause for '#pragma omp target data'}}
{}
L1:
foo();
#pragma omp target data map(a) allocate(a) // expected-error {{unexpected OpenMP clause 'allocate' in directive '#pragma omp target data'}}
{
foo();
goto L1; // expected-error {{use of undeclared label 'L1'}}
}
goto L2; // expected-error {{use of undeclared label 'L2'}}
#pragma omp target data map(a)
L2:
foo();
#pragma omp target data map(a)(i) // expected-warning {{extra tokens at the end of '#pragma omp target data' are ignored}}
{
foo();
}
#pragma omp target unknown // expected-warning {{extra tokens at the end of '#pragma omp target' are ignored}}
{
foo();
}
#pragma omp target data map(delete: a) // expected-error {{map type 'delete' is not allowed for '#pragma omp target data'}}
{
foo();
}
#pragma omp target data map(release: a) // expected-error {{map type 'release' is not allowed for '#pragma omp target data'}}
{
foo();
}
const int b = 5;
int marr[10][10], iarr[5];
#pragma omp target data map(to: marr[10][0:2:2]) // expected-error {{expected ']'}} expected-note {{to match this '['}}
{}
#pragma omp target data map(alloc: iarr[:2:b]) // expected-error {{expected ']'}} expected-note {{to match this '['}}
{}
return 0;
}
|
mpdpush2.c | /* C Library for Skeleton 2-1/2D Darwin MPI/OpenMP PIC Code */
/* written by Viktor K. Decyk, UCLA */
#include <stdlib.h>
#include <stdio.h>
#include <complex.h>
#include <math.h>
#include "mpdpush2.h"
#include "mpplib2.h"
/*--------------------------------------------------------------------*/
double ranorm() {
/* this program calculates a random number y from a gaussian distribution
with zero mean and unit variance, according to the method of
mueller and box:
y(k) = (-2*ln(x(k)))**1/2*sin(2*pi*x(k+1))
y(k+1) = (-2*ln(x(k)))**1/2*cos(2*pi*x(k+1)),
where x is a random number uniformly distributed on (0,1).
written for the ibm by viktor k. decyk, ucla
local data */
static int r1 = 885098780, r2 = 1824280461;
static int r4 = 1396483093, r5 = 55318673;
static int iflg = 0;
static double h1l = 65531.0, h1u = 32767.0, h2l = 65525.0;
static double r0 = 0.0;
int isc, i1;
double ranorm, r3, asc, bsc, temp;
if (iflg==1) {
ranorm = r0;
r0 = 0.0;
iflg = 0;
return ranorm;
}
isc = 65536;
asc = (double) isc;
bsc = asc*asc;
i1 = r1 - (r1/isc)*isc;
r3 = h1l*(double) r1 + asc*h1u*(double) i1;
i1 = r3/bsc;
r3 -= ((double) i1)*bsc;
bsc = 0.5*bsc;
i1 = r2/isc;
isc = r2 - i1*isc;
r0 = h1l*(double) r2 + asc*h1u*(double) isc;
asc = 1.0/bsc;
isc = r0*asc;
r2 = r0 - ((double) isc)*bsc;
r3 += (double) isc + 2.0*h1u*(double) i1;
isc = r3*asc;
r1 = r3 - ((double) isc)*bsc;
temp = sqrt(-2.0*log((((double) r1) + ((double) r2)*asc)*asc));
isc = 65536;
asc = (double) isc;
bsc = asc*asc;
i1 = r4 - (r4/isc)*isc;
r3 = h2l*(double) r4 + asc*h1u*(double) i1;
i1 = r3/bsc;
r3 -= ((double) i1)*bsc;
bsc = 0.5*bsc;
i1 = r5/isc;
isc = r5 - i1*isc;
r0 = h2l*(double) r5 + asc*h1u*(double) isc;
asc = 1.0/bsc;
isc = r0*asc;
r5 = r0 - ((double) isc)*bsc;
r3 += (double) isc + 2.0*h1u*(double) i1;
isc = r3*asc;
r4 = r3 - ((double) isc)*bsc;
r0 = 6.28318530717959*((((double) r4) + ((double) r5)*asc)*asc);
ranorm = temp*sin(r0);
r0 = temp*cos(r0);
iflg = 1;
return ranorm;
}
/*--------------------------------------------------------------------*/
void cpdicomp2l(float edges[], int *nyp, int *noff, int *nypmx,
int *nypmn, int ny, int kstrt, int nvp, int idps) {
/* this subroutine determines spatial boundaries for uniform particle
decomposition, calculates number of grid points in each spatial
region, and the offset of these grid points from the global address
nvp must be < ny. some combinations of ny and nvp result in a zero
value of nyp. this is not supported.
integer boundaries are set.
input: ny, kstrt, nvp, idps, output: edges, nyp, noff, nypmx, nypmn
edges[0] = lower boundary of particle partition
edges[1] = upper boundary of particle partition
nyp = number of primary (complete) gridpoints in particle partition
noff = lowermost global gridpoint in particle partition
nypmx = maximum size of particle partition, including guard cells
nypmn = minimum value of nyp
ny = system length in y direction
kstrt = starting data block number (processor id + 1)
nvp = number of real or virtual processors
idps = number of partition boundaries
local data */
int kb, kyp;
float at1, any;
int mypm[2], iwork2[2];
any = (float) ny;
/* determine decomposition */
kb = kstrt - 1;
kyp = (ny - 1)/nvp + 1;
at1 = (float) kyp;
edges[0] = at1*(float) kb;
if (edges[0] > any)
edges[0] = any;
*noff = edges[0];
edges[1] = at1*(float) (kb + 1);
if (edges[1] > any)
edges[1] = any;
kb = edges[1];
*nyp = kb - *noff;
/* find maximum/minimum partition size */
mypm[0] = *nyp;
mypm[1] = -(*nyp);
cppimax(mypm,iwork2,2);
*nypmx = mypm[0] + 1;
*nypmn = -mypm[1];
return;
}
/*--------------------------------------------------------------------*/
void cpdistr2h(float part[], float edges[], int *npp, int nps,
float vtx, float vty, float vtz, float vdx, float vdy,
float vdz, int npx, int npy, int nx, int ny, int idimp,
int npmax, int idps, int ipbc, int *ierr) {
/* for 2-1/2d code, this subroutine calculates initial particle
co-ordinates and velocities with uniform density and maxwellian
velocity with drift for distributed data.
input: all except part, ierr, output: part, npp, ierr
part[n][0] = position x of particle n in partition
part[n][1] = position y of particle n in partition
part[n][2] = velocity vx of particle n in partition
part[n][3] = velocity vy of particle n in partition
part[n][4] = velocity vz of particle n in partition
edges[0] = lower boundary of particle partition
edges[1] = upper boundary of particle partition
npp = number of particles in partition
nps = starting address of particles in partition
vtx/vty/vtz = thermal velocity of electrons in x/y/z direction
vdx/vdy/vdz = drift velocity of beam electrons in x/y/z direction
npx/npy = initial number of particles distributed in x/y direction
nx/ny = system length in x/y direction
idimp = size of phase space = 5
npmax = maximum number of particles in each partition
idps = number of partition boundaries
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
ierr = (0,1) = (no,yes) error condition exists
ranorm = gaussian random number with zero mean and unit variance
with spatial decomposition
local data */
int j, k, npt, k1, npxyp;
float edgelx, edgely, at1, at2, xt, yt, vxt, vyt, vzt;
double dnpx, dnpxy, dt1;
int ierr1[1], iwork1[1];
double sum4[4], work4[4];
*ierr = 0;
/* particle distribution constant */
dnpx = (double) npx;
/* set boundary values */
edgelx = 0.0;
edgely = 0.0;
at1 = (float) nx/(float) npx;
at2 = (float) ny/(float) npy;
if (ipbc==2) {
edgelx = 1.0;
edgely = 1.0;
at1 = (float) (nx-2)/(float) npx;
at2 = (float) (ny-2)/(float) npy;
}
else if (ipbc==3) {
edgelx = 1.0;
at1 = (float) (nx-2)/(float) npx;
}
npt = *npp;
/* uniform density profile */
for (k = 0; k < npy; k++) {
yt = edgely + at2*(((float) k) + 0.5);
for (j = 0; j < npx; j++) {
xt = edgelx + at1*(((float) j) + 0.5);
/* maxwellian velocity distribution */
vxt = vtx*ranorm();
vyt = vty*ranorm();
vzt = vtz*ranorm();
if ((yt >= edges[0]) && (yt < edges[1])) {
if (npt < npmax) {
k1 = idimp*npt;
part[k1] = xt;
part[1+k1] = yt;
part[2+k1] = vxt;
part[3+k1] = vyt;
part[4+k1] = vzt;
npt += 1;
}
else
*ierr += 1;
}
}
}
npxyp = 0;
/* add correct drift */
sum4[0] = 0.0;
sum4[1] = 0.0;
sum4[2] = 0.0;
for (j = nps-1; j < npt; j++) {
npxyp += 1;
sum4[0] += part[2+idimp*j];
sum4[1] += part[3+idimp*j];
sum4[2] += part[4+idimp*j];
}
sum4[3] = npxyp;
cppdsum(sum4,work4,4);
dnpxy = sum4[3];
ierr1[0] = *ierr;
cppimax(ierr1,iwork1,1);
*ierr = ierr1[0];
dt1 = 1.0/dnpxy;
sum4[0] = dt1*sum4[0] - vdx;
sum4[1] = dt1*sum4[1] - vdy;
sum4[2] = dt1*sum4[2] - vdz;
for (j = nps-1; j < npt; j++) {
part[2+idimp*j] -= sum4[0];
part[3+idimp*j] -= sum4[1];
part[4+idimp*j] -= sum4[2];
}
/* process errors */
dnpxy -= dnpx*(double) npy;
if (dnpxy != 0.0)
*ierr = dnpxy;
*npp = npt;
return;
}
/*--------------------------------------------------------------------*/
void cppdblkp2l(float part[], int kpic[], int npp, int noff, int *nppmx,
int idimp, int npmax, int mx, int my, int mx1,
int mxyp1, int *irc) {
/* this subroutine finds the maximum number of particles in each tile of
mx, my to calculate size of segmented particle array ppart
linear interpolation, spatial decomposition in y direction
input: all except kpic, nppmx, output: kpic, nppmx
part = input particle array
part[n][0] = position x of particle n in partition
part[n][1] = position y of particle n in partition
kpic = output number of particles per tile
nppmx = return maximum number of particles in tile
npp = number of particles in partition
noff = backmost global gridpoint in particle partition
idimp = size of phase space = 4
npmax = maximum number of particles in each partition
mx/my = number of grids in sorting cell in x and y
mx1 = (system length in x direction - 1)/mx + 1
mxyp1 = mx1*myp1, where myp1=(partition length in y direction-1)/my+1
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int j, k, n, m, mnoff, isum, ist, npx, ierr;
mnoff = noff;
ierr = 0;
/* clear counter array */
for (k = 0; k < mxyp1; k++) {
kpic[k] = 0;
}
/* find how many particles in each tile */
for (j = 0; j < npp; j++) {
n = part[idimp*j];
m = part[1+idimp*j];
n = n/mx;
m = (m - mnoff)/my;
m = n + mx1*m;
if (m < mxyp1) {
kpic[m] += 1;
}
else {
ierr = ierr > m-mxyp1+1 ? ierr : m-mxyp1+1;
}
}
/* find maximum */
isum = 0;
npx = 0;
for (k = 0; k < mxyp1; k++) {
ist = kpic[k];
npx = npx > ist ? npx : ist;
isum += ist;
}
*nppmx = npx;
/* check for errors */
if (ierr > 0) {
*irc = ierr;
}
else if (isum != npp) {
*irc = -1;
}
return;
}
/*--------------------------------------------------------------------*/
void cpppmovin2l(float part[], float ppart[], int kpic[], int npp,
int noff, int nppmx, int idimp, int npmax, int mx,
int my, int mx1, int mxyp1, int *irc) {
/* this subroutine sorts particles by x,y grid in tiles of
mx, my and copies to segmented array ppart
linear interpolation, spatial decomposition in y direction
input: all except ppart, kpic, output: ppart, kpic
part/ppart = input/output particle arrays
part[n][0] = position x of particle n in partition
part[n][1] = position y of particle n in partition
kpic = output number of particles per tile
nppmx = maximum number of particles in tile
npp = number of particles in partition
noff = backmost global gridpoint in particle partition
idimp = size of phase space = 4
npmax = maximum number of particles in each partition
mx/my = number of grids in sorting cell in x and y
mx1 = (system length in x direction - 1)/mx + 1
mxyp1 = mx1*myp1, where myp1=(partition length in y direction-1)/my+1
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int i, j, k, n, m, mnoff, ip, ierr;
mnoff = noff;
ierr = 0;
/* clear counter array */
for (k = 0; k < mxyp1; k++) {
kpic[k] = 0;
}
/* find addresses of particles at each tile and reorder particles */
for (j = 0; j < npp; j++) {
n = part[idimp*j];
m = part[1+idimp*j];
n = n/mx;
m = (m - mnoff)/my;
m = n + mx1*m;
ip = kpic[m];
if (ip < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[i+idimp*(ip+nppmx*m)] = part[i+idimp*j]; }
}
else {
ierr = ierr > ip-nppmx+1 ? ierr : ip-nppmx+1;
}
kpic[m] = ip + 1;
}
if (ierr > 0)
*irc = ierr;
return;
}
/*--------------------------------------------------------------------*/
void cpppcheck2l(float ppart[], int kpic[], int noff, int nyp,
int idimp, int nppmx, int nx, int mx, int my, int mx1,
int myp1, int *irc) {
/* this subroutine performs a sanity check to make sure particles sorted
by x,y grid in tiles of mx, my, are all within bounds.
tiles are assumed to be arranged in 2D linear memory
input: all except irc
output: irc
ppart[k][n][0] = position x of particle n in tile k
ppart[k][n][1] = position y of particle n in tile k
kpic[k] = number of reordered output particles in tile k
noff = lowermost global gridpoint in particle partition.
nyp = number of primary (complete) gridpoints in particle partition
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
nx = system length in x direction
mx/my = number of grids in sorting cell in x/y
mx1 = (system length in x direction - 1)/mx + 1
myp1 = (partition length in y direction - 1)/my + 1
irc = particle error, returned only if error occurs, when irc > 0
local data */
int mxyp1, noffp, moffp, nppp, j, k, ist, nn, mm;
float edgelx, edgely, edgerx, edgery, dx, dy;
mxyp1 = mx1*myp1;
/* loop over tiles */
#pragma omp parallel for \
private(j,k,noffp,moffp,nppp,nn,mm,ist,edgelx,edgely,edgerx,edgery,dx, \
dy)
for (k = 0; k < mxyp1; k++) {
noffp = k/mx1;
moffp = my*noffp;
noffp = mx*(k - mx1*noffp);
nppp = kpic[k];
nn = nx - noffp;
nn = mx < nn ? mx : nn;
mm = nyp - moffp;
mm = my < mm ? my : mm;
edgelx = noffp;
edgerx = noffp + nn;
edgely = noff + moffp;
edgery = noff + moffp + mm;
/* loop over particles in tile */
for (j = 0; j < nppp; j++) {
dx = ppart[idimp*(j+nppmx*k)];
dy = ppart[1+idimp*(j+nppmx*k)];
/* find particles going out of bounds */
ist = 0;
if (dx < edgelx)
ist = 1;
if (dx >= edgerx)
ist = 2;
if (dy < edgely)
ist += 3;
if (dy >= edgery)
ist += 6;
if (ist > 0)
*irc = k + 1;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cppgbppush23l(float ppart[], float fxy[], float bxy[], int kpic[],
int noff, int nyp, float qbm, float dt, float dtc,
float *ek, int idimp, int nppmx, int nx, int ny,
int mx, int my, int nxv, int nypmx, int mx1,
int mxyp1, int ipbc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with magnetic field. Using the Boris Mover.
OpenMP version using guard cells, for distributed data
data deposited in tiles
particles stored segmented array
119 flops/particle, 1 divide, 29 loads, 5 stores
input: all, output: ppart, ek
velocity equations used are:
vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t)), omy = (q/m)*by(x(t),y(t)), and
omz = (q/m)*bz(x(t),y(t)).
position equations used are:
x(t+dt)=x(t) + vx(t+dt/2)*dt
y(t+dt)=y(t) + vy(t+dt/2)*dt
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][n][0] = position x of particle n in partition in tile m
ppart[m][n][1] = position y of particle n in partition in tile m
ppart[m][n][2] = x velocity of particle n in partition in tile m
ppart[m][n][3] = y velocity of particle n in partition in tile m
ppart[m][n][4] = z velocity of particle n in partition in tile m
fxy[k][j][0] = x component of force/charge at grid (j,kk)
fxy[k][j][1] = y component of force/charge at grid (j,kk)
fxy[k][j][2] = z component of force/charge at grid (j,kk)
that is, convolution of electric field over particle shape,
where kk = k + noff
bxy[k][j][0] = x component of magnetic field at grid (j,kk)
bxy[k][j][1] = y component of magnetic field at grid (j,kk)
bxy[k][j][2] = z component of magnetic field at grid (j,kk)
that is, the convolution of magnetic field over particle shape,
where kk = k + noff
kpic = number of particles per tile
noff = lowermost global gridpoint in particle partition.
nyp = number of primary (complete) gridpoints in particle partition
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
kinetic energy/mass at time t is also calculated, using
ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of field arrays, must be >= nx+1
nypmx = maximum size of particle partition, including guard cells.
mx1 = (system length in x direction - 1)/mx + 1
mxyp1 = mx1*myp1, where myp1=(partition length in y direction-1)/my+1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
#define MXV 33
#define MYV 33
int noffp, moffp, npoff, nppp, mxv3;
int mnoff, i, j, k, nn, mm, nm;
float qtmh, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float dx, dy, dz, ox, oy, oz, acx, acy, acz, omxt, omyt, omzt, omt;
float anorm, rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float x, y;
float sfxy[3*MXV*MYV], sbxy[3*MXV*MYV];
/* float sfxy[3*(mx+1)*(my+1)], sbxy[3*(mx+1)*(my+1)]; */
double sum1, sum2;
mxv3 = 3*(mx + 1);
qtmh = 0.5*qbm*dt;
sum2 = 0.0;
/* set boundary values */
edgelx = 0.0f;
edgely = 1.0f;
edgerx = (float) (nx);
edgery = (float) (ny-1);
if ((ipbc==2) || (ipbc==3)) {
edgelx = 1.0f;
edgerx = (float) (nx-1);
}
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noffp,moffp,nppp,npoff,nn,mm,nm,mnoff,x,y,dxp,dyp,amx, \
amy,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm,rot1,rot2, \
rot3,rot4,rot5,rot6,rot7,rot8,rot9,sum1,sfxy,sbxy) \
reduction(+:sum2)
for (k = 0; k < mxyp1; k++) {
noffp = k/mx1;
moffp = my*noffp;
noffp = mx*(k - mx1*noffp);
nppp = kpic[k];
mnoff = moffp + noff;
npoff = nppmx*k;
/* load local fields from global array */
nn = (mx < nx-noffp ? mx : nx-noffp) + 1;
mm = (my < nyp-moffp ? my : nyp-moffp) + 1;
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxy[3*i+mxv3*j] = fxy[3*(i+noffp+nxv*(j+moffp))];
sfxy[1+3*i+mxv3*j] = fxy[1+3*(i+noffp+nxv*(j+moffp))];
sfxy[2+3*i+mxv3*j] = fxy[2+3*(i+noffp+nxv*(j+moffp))];
}
}
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sbxy[3*i+mxv3*j] = bxy[3*(i+noffp+nxv*(j+moffp))];
sbxy[1+3*i+mxv3*j] = bxy[1+3*(i+noffp+nxv*(j+moffp))];
sbxy[2+3*i+mxv3*j] = bxy[2+3*(i+noffp+nxv*(j+moffp))];
}
}
sum1 = 0.0;
/* loop over particles in tile */
for (j = 0; j < nppp; j++) {
/* find interpolation weights */
x = ppart[idimp*(j+npoff)];
y = ppart[1+idimp*(j+npoff)];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = 3*(nn - noffp) + mxv3*(mm - mnoff);
amx = 1.0 - dxp;
amy = 1.0 - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + 3;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += mxv3;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + 3;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + 3;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += mxv3;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + 3;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[2+idimp*(j+npoff)] + dx;
acy = ppart[3+idimp*(j+npoff)] + dy;
acz = ppart[4+idimp*(j+npoff)] + dz;
/* time-centered kinetic energy */
sum1 += (acx*acx + acy*acy + acz*acz);
/* calculate cyclotron frequency */
omxt = qtmh*ox;
omyt = qtmh*oy;
omzt = qtmh*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0/(1.0 + omt);
omt = 0.5*(1.0 - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
dx += (rot1*acx + rot2*acy + rot3*acz)*anorm;
dy += (rot4*acx + rot5*acy + rot6*acz)*anorm;
dz += (rot7*acx + rot8*acy + rot9*acz)*anorm;
ppart[2+idimp*(j+npoff)] = dx;
ppart[3+idimp*(j+npoff)] = dy;
ppart[4+idimp*(j+npoff)] = dz;
/* new position */
dx = x + dx*dtc;
dy = y + dy*dtc;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[idimp*(j+npoff)];
ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)];
}
if ((dy < edgely) || (dy >= edgery)) {
dy = ppart[1+idimp*(j+npoff)];
ppart[3+idimp*(j+npoff)] = -ppart[3+idimp*(j+npoff)];
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[idimp*(j+npoff)];
ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)];
}
}
/* set new position */
ppart[idimp*(j+npoff)] = dx;
ppart[1+idimp*(j+npoff)] = dy;
}
sum2 += sum1;
}
/* normalize kinetic energy */
*ek += 0.5*sum2;
return;
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cppgbppushf23l(float ppart[], float fxy[], float bxy[], int kpic[],
int ncl[], int ihole[], int noff, int nyp,
float qbm, float dt, float dtc, float *ek,
int idimp, int nppmx, int nx, int ny,
int mx, int my, int nxv, int nypmx, int mx1,
int mxyp1, int ntmax, int *irc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with magnetic field. Using the Boris Mover.
with periodic boundary conditions.
also determines list of particles which are leaving this tile
OpenMP version using guard cells, for distributed data
data deposited in tiles
particles stored segmented array
119 flops/particle, 1 divide, 29 loads, 5 stores
input: all except ncl, ihole, irc, output: ppart, ncl, ihole, irc, ek
velocity equations used are:
vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t)), omy = (q/m)*by(x(t),y(t)), and
omz = (q/m)*bz(x(t),y(t)).
position equations used are:
x(t+dt)=x(t) + vx(t+dt/2)*dt
y(t+dt)=y(t) + vy(t+dt/2)*dt
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][n][0] = position x of particle n in partition in tile m
ppart[m][n][1] = position y of particle n in partition in tile m
ppart[m][n][2] = x velocity of particle n in partition in tile m
ppart[m][n][3] = y velocity of particle n in partition in tile m
ppart[m][n][4] = z velocity of particle n in partition in tile m
fxy[k][j][0] = x component of force/charge at grid (j,kk)
fxy[k][j][1] = y component of force/charge at grid (j,kk)
fxy[k][j][2] = z component of force/charge at grid (j,kk)
that is, convolution of electric field over particle shape,
where kk = k + noff
bxy[k][j][0] = x component of magnetic field at grid (j,kk)
bxy[k][j][1] = y component of magnetic field at grid (j,kk)
bxy[k][j][2] = z component of magnetic field at grid (j,kk)
that is, the convolution of magnetic field over particle shape,
where kk = k + noff
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
noff = lowermost global gridpoint in particle partition.
nyp = number of primary (complete) gridpoints in particle partition
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
kinetic energy/mass at time t is also calculated, using
ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of field arrays, must be >= nx+1
nypmx = maximum size of particle partition, including guard cells.
mx1 = (system length in x direction - 1)/mx + 1
mxyp1 = mx1*myp1, where myp1=(partition length in y direction-1)/my+1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
#define MXV 33
#define MYV 33
int noffp, moffp, npoff, nppp, mxv3;
int mnoff, i, j, k, ih, nh, nn, mm, nm;
float qtmh, dxp, dyp, amx, amy;
float dx, dy, dz, ox, oy, oz, acx, acy, acz, omxt, omyt, omzt, omt;
float anorm, rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float anx, any, edgelx, edgely, edgerx, edgery;
float x, y;
float sfxy[3*MXV*MYV], sbxy[3*MXV*MYV];
/* float sfxy[3*(mx+1)*(my+1)], sbxy[3*(mx+1)*(my+1)]; */
double sum1, sum2;
mxv3 = 3*(mx + 1);
qtmh = 0.5*qbm*dt;
anx = (float) nx;
any = (float) ny;
sum2 = 0.0;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noffp,moffp,nppp,npoff,nn,mm,nm,ih,nh,mnoff,x,y,dxp,dyp, \
amx,amy,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm,rot1, \
rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,edgelx,edgely,edgerx,edgery, \
sum1,sfxy,sbxy) \
reduction(+:sum2)
for (k = 0; k < mxyp1; k++) {
noffp = k/mx1;
moffp = my*noffp;
noffp = mx*(k - mx1*noffp);
nppp = kpic[k];
nn = nx - noffp;
nn = mx < nn ? mx : nn;
mm = nyp - moffp;
mm = my < mm ? my : mm;
edgelx = noffp;
edgerx = noffp + nn;
edgely = noff + moffp;
edgery = noff + moffp + mm;
ih = 0;
nh = 0;
nn += 1;
mm += 1;
mnoff = moffp + noff;
npoff = nppmx*k;
/* load local fields from global array */
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxy[3*i+mxv3*j] = fxy[3*(i+noffp+nxv*(j+moffp))];
sfxy[1+3*i+mxv3*j] = fxy[1+3*(i+noffp+nxv*(j+moffp))];
sfxy[2+3*i+mxv3*j] = fxy[2+3*(i+noffp+nxv*(j+moffp))];
}
}
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sbxy[3*i+mxv3*j] = bxy[3*(i+noffp+nxv*(j+moffp))];
sbxy[1+3*i+mxv3*j] = bxy[1+3*(i+noffp+nxv*(j+moffp))];
sbxy[2+3*i+mxv3*j] = bxy[2+3*(i+noffp+nxv*(j+moffp))];
}
}
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
sum1 = 0.0;
/* loop over particles in tile */
for (j = 0; j < nppp; j++) {
/* find interpolation weights */
x = ppart[idimp*(j+npoff)];
y = ppart[1+idimp*(j+npoff)];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = 3*(nn - noffp) + mxv3*(mm - mnoff);
amx = 1.0 - dxp;
amy = 1.0 - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + 3;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += mxv3;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + 3;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + 3;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += mxv3;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + 3;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[2+idimp*(j+npoff)] + dx;
acy = ppart[3+idimp*(j+npoff)] + dy;
acz = ppart[4+idimp*(j+npoff)] + dz;
/* time-centered kinetic energy */
sum1 += (acx*acx + acy*acy + acz*acz);
/* calculate cyclotron frequency */
omxt = qtmh*ox;
omyt = qtmh*oy;
omzt = qtmh*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0/(1.0 + omt);
omt = 0.5*(1.0 - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
dx += (rot1*acx + rot2*acy + rot3*acz)*anorm;
dy += (rot4*acx + rot5*acy + rot6*acz)*anorm;
dz += (rot7*acx + rot8*acy + rot9*acz)*anorm;
ppart[2+idimp*(j+npoff)] = dx;
ppart[3+idimp*(j+npoff)] = dy;
ppart[4+idimp*(j+npoff)] = dz;
/* new position */
dx = x + dx*dtc;
dy = y + dy*dtc;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0;
}
else {
mm += 3;
}
}
/* set new position */
ppart[idimp*(j+npoff)] = dx;
ppart[1+idimp*(j+npoff)] = dy;
/* increment counters */
if (mm > 0) {
ncl[mm+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh = 1;
}
}
}
sum2 += sum1;
/* set error and end of file flag */
/* ihole overflow */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
/* normalize kinetic energy */
*ek += 0.5*sum2;
return;
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cppgppost2l(float ppart[], float q[], int kpic[], int noff,
float qm, int idimp, int nppmx, int mx, int my,
int nxv, int nypmx, int mx1, int mxyp1) {
/* for 2d code, this subroutine calculates particle charge density
using first-order linear interpolation, periodic boundaries
OpenMP version using guard cells, for distributed data
data deposited in tiles
particles stored segmented array
17 flops/particle, 6 loads, 4 stores
input: all, output: q
charge density is approximated by values at the nearest grid points
q(n,m)=qm*(1.-dx)*(1.-dy)
q(n+1,m)=qm*dx*(1.-dy)
q(n,m+1)=qm*(1.-dx)*dy
q(n+1,m+1)=qm*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
ppart[m][n][0] = position x of particle n in partition in tile m
ppart[m][n][1] = position y of particle n in partition in tile m
q[k][j] = charge density at grid point (j,kk),
where kk = k + noff
kpic = number of particles per tile
noff = lowermost global gridpoint in particle partition.
qm = charge on particle, in units of e
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of charge array, must be >= nx+1
nypmx = maximum size of particle partition, including guard cells.
mx1 = (system length in x direction - 1)/mx + 1
mxyp1 = mx1*myp1, where myp1=(partition length in y direction-1)/my+1
local data */
#define MXV 33
#define MYV 33
int noffp, moffp, npoff, nppp, mxv;
int mnoff, i, j, k, nn, mm;
float x, y, dxp, dyp, amx, amy;
float sq[MXV*MYV];
/* float sq[(mx+1)*(my+1)]; */
mxv = mx + 1;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noffp,moffp,nppp,npoff,mnoff,nn,mm,x,y,dxp,dyp,amx,amy, \
sq)
for (k = 0; k < mxyp1; k++) {
noffp = k/mx1;
moffp = my*noffp;
noffp = mx*(k - mx1*noffp);
nppp = kpic[k];
npoff = nppmx*k;
mnoff = moffp + noff;
/* zero out local accumulator */
for (j = 0; j < my+1; j++) {
for (i = 0; i < mx+1; i++) {
sq[i+mxv*j] = 0.0f;
}
}
/* loop over particles in tile */
for (j = 0; j < nppp; j++) {
/* find interpolation weights */
x = ppart[idimp*(j+npoff)];
y = ppart[1+idimp*(j+npoff)];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
nn = nn - noffp + mxv*(mm - mnoff);
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit charge within tile to local accumulator */
x = sq[nn] + amx*amy;
y = sq[nn+1] + dxp*amy;
sq[nn] = x;
sq[nn+1] = y;
nn += mxv;
x = sq[nn] + amx*dyp;
y = sq[nn+1] + dxp*dyp;
sq[nn] = x;
sq[nn+1] = y;
}
/* deposit charge to interior points in global array */
nn = nxv - noffp;
mm = nypmx - moffp;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
q[i+noffp+nxv*(j+moffp)] += sq[i+mxv*j];
}
}
/* deposit charge to edge points in global array */
mm = nypmx - moffp;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noffp+nxv*moffp] += sq[i];
if (mm > my) {
#pragma omp atomic
q[i+noffp+nxv*(mm+moffp-1)] += sq[i+mxv*(mm-1)];
}
}
nn = nxv - noffp;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
q[noffp+nxv*(j+moffp)] += sq[mxv*j];
if (nn > mx) {
#pragma omp atomic
q[nn+noffp-1+nxv*(j+moffp)] += sq[nn-1+mxv*j];
}
}
}
return;
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cppgjppost2l(float ppart[], float cu[], int kpic[], int noff,
float qm, float dt, int nppmx, int idimp, int nx,
int ny, int mx, int my, int nxv, int nypmx, int mx1,
int mxyp1, int ipbc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation
in addition, particle positions are advanced a half time-step
OpenMP version using guard cells, for distributed data
data deposited in tiles
particles stored segmented array
41 flops/particle, 17 loads, 14 stores
input: all, output: ppart, cu
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*vi, where i = x,y,z
ppart[m][n][0] = position x of particle n in partition in tile m
ppart[m][n][1] = position y of particle n in partition in tile m
ppart[m][n][2] = x velocity of particle n in partition in tile m
ppart[m][n][3] = y velocity of particle n in partition in tile m
ppart[m][n][4] = z velocity of particle n in partition in tile m
cu[k][j][i] = ith component of current density at grid point (j,kk),
where kk = k + noff
kpic = number of particles per tile
noff = lowermost global gridpoint in particle partition.
qm = charge on particle, in units of e
dt = time interval between successive calculations
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of current array, must be >= nx+1
nypmx = maximum size of particle partition, including guard cells.
mx1 = (system length in x direction - 1)/mx + 1
mxyp1 = mx1*myp1, where myp1=(partition length in y direction-1)/my+1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
#define MXV 33
#define MYV 33
int noffp, moffp, npoff, nppp, mxv3;
int mnoff, i, j, k, nn, mm;
float edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz;
float scu[3*MXV*MYV];
/* float scu[3*(mx+1)*(my+1)]; */
mxv3 = 3*(mx + 1);
/* set boundary values */
edgelx = 0.0f;
edgely = 1.0f;
edgerx = (float) (nx);
edgery = (float) (ny-1);
if ((ipbc==2) || (ipbc==3)) {
edgelx = 1.0f;
edgerx = (float) (nx-1);
}
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noffp,moffp,nppp,npoff,nn,mm,mnoff,x,y,dxp,dyp,amx,amy, \
dx,dy,vx,vy,vz,scu)
for (k = 0; k < mxyp1; k++) {
noffp = k/mx1;
moffp = my*noffp;
noffp = mx*(k - mx1*noffp);
nppp = kpic[k];
mnoff = moffp + noff;
npoff = nppmx*k;
/* zero out local accumulator */
for (j = 0; j < mxv3*(my+1); j++) {
scu[j] = 0.0f;
}
/* loop over particles in tile */
for (j = 0; j < nppp; j++) {
/* find interpolation weights */
x = ppart[idimp*(j+npoff)];
y = ppart[1+idimp*(j+npoff)];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
nn = 3*(nn - noffp) + mxv3*(mm - mnoff);
amx = qm - dxp;
amy = 1.0 - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx = ppart[2+idimp*(j+npoff)];
vy = ppart[3+idimp*(j+npoff)];
vz = ppart[4+idimp*(j+npoff)];
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = amx*dyp;
mm = nn + 3;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
dy = dxp*dyp;
nn += mxv3;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
mm = nn + 3;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[idimp*(j+npoff)];
ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)];
}
if ((dy < edgely) || (dy >= edgery)) {
dy = ppart[1+idimp*(j+npoff)];
ppart[3+idimp*(j+npoff)] = -ppart[3+idimp*(j+npoff)];
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[idimp*(j+npoff)];
ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)];
}
}
/* set new position */
ppart[idimp*(j+npoff)] = dx;
ppart[1+idimp*(j+npoff)] = dy;
}
/* deposit current to interior points in global array */
nn = nxv - noffp;
mm = nypmx - moffp;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
cu[3*(i+noffp+nxv*(j+moffp))] += scu[3*i+mxv3*j];
cu[1+3*(i+noffp+nxv*(j+moffp))] += scu[1+3*i+mxv3*j];
cu[2+3*(i+noffp+nxv*(j+moffp))] += scu[2+3*i+mxv3*j];
}
}
/* deposit current to edge points in global array */
mm = nypmx - moffp;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[3*(i+noffp+nxv*moffp)] += scu[3*i];
#pragma omp atomic
cu[1+3*(i+noffp+nxv*moffp)] += scu[1+3*i];
#pragma omp atomic
cu[2+3*(i+noffp+nxv*moffp)] += scu[2+3*i];
if (mm > my) {
#pragma omp atomic
cu[3*(i+noffp+nxv*(mm+moffp-1))] += scu[3*i+mxv3*(mm-1)];
#pragma omp atomic
cu[1+3*(i+noffp+nxv*(mm+moffp-1))] += scu[1+3*i+mxv3*(mm-1)];
#pragma omp atomic
cu[2+3*(i+noffp+nxv*(mm+moffp-1))] += scu[2+3*i+mxv3*(mm-1)];
}
}
nn = nxv - noffp;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[3*(noffp+nxv*(j+moffp))] += scu[mxv3*j];
#pragma omp atomic
cu[1+3*(noffp+nxv*(j+moffp))] += scu[1+mxv3*j];
#pragma omp atomic
cu[2+3*(noffp+nxv*(j+moffp))] += scu[2+mxv3*j];
if (nn > mx) {
#pragma omp atomic
cu[3*(nn+noffp-1+nxv*(j+moffp))] += scu[3*(nn-1)+mxv3*j];
#pragma omp atomic
cu[1+3*(nn+noffp-1+nxv*(j+moffp))] += scu[1+3*(nn-1)+mxv3*j];
#pragma omp atomic
cu[2+3*(nn+noffp-1+nxv*(j+moffp))] += scu[2+3*(nn-1)+mxv3*j];
}
}
}
return;
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cppgmjppost2l(float ppart[], float amu[], int kpic[], int noff,
float qm, int nppmx, int idimp, int mx, int my,
int nxv, int nypmx, int mx1, int mxyp1) {
/* for 2-1/2d code, this subroutine calculates particle momentum flux
using first-order spline interpolation
OpenMP version using guard cells, for distributed data
data deposited in tiles
particles stored segmented array
51 flops/particle, 21 loads, 16 stores
input: all, output: ppart, amu
momentum flux is approximated by values at the nearest grid points
amu(i,n,m)=qci*(1.-dx)*(1.-dy)
amu(i,n+1,m)=qci*dx*(1.-dy)
amu(i,n,m+1)=qci*(1.-dx)*dy
amu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*vj*vk, where jk = xx-yy,xy,zx,zy, for i = 1, 4
where vj = vj(t-dt/2) and vk = vk(t-dt/2)
ppart[m][n][0] = position x of particle n in partition in tile m at t
ppart[m][n][1] = position y of particle n in partition in tile m at t
ppart[m][n][2] = x velocity of particle n in partition in tile m
at t - dt/2
ppart[m][n][3] = y velocity of particle n in partition in tile m
at t - dt/2
ppart[m][n][4] = z velocity of particle n in partition in tile m
at t - dt/2
amu[k][j][i] = ith component of momentum flux at grid point j,kk
where kk = k + noff
kpic = number of particles per tile
noff = lowermost global gridpoint in particle partition.
qm = charge on particle, in units of e
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of current array, must be >= nx+1
nypmx = maximum size of particle partition, including guard cells.
mx1 = (system length in x direction - 1)/mx + 1
mxyp1 = mx1*myp1, where myp1=(partition length in y direction-1)/my+1
local data */
#define MXV 33
#define MYV 33
int noffp, moffp, npoff, nppp, mxv4;
int mnoff, i, j, k, nn, mm;
float dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz, v1, v2, v3, v4;
float samu[4*MXV*MYV];
/* float samu[4*(mx+1)*(my+1)]; */
mxv4 = 4*(mx + 1);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noffp,moffp,nppp,npoff,nn,mm,mnoff,x,y,dxp,dyp,amx,amy, \
dx,dy,vx,vy,vz,v1,v2,v3,v4,samu)
for (k = 0; k < mxyp1; k++) {
noffp = k/mx1;
moffp = my*noffp;
noffp = mx*(k - mx1*noffp);
nppp = kpic[k];
mnoff = moffp + noff;
npoff = nppmx*k;
/* zero out local accumulator */
for (j = 0; j < mxv4*(my+1); j++) {
samu[j] = 0.0f;
}
/* loop over particles in tile */
for (j = 0; j < nppp; j++) {
/* find interpolation weights */
x = ppart[idimp*(j+npoff)];
y = ppart[1+idimp*(j+npoff)];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
nn = 4*(nn - noffp) + mxv4*(mm - mnoff);
amx = qm - dxp;
amy = 1.0 - dyp;
/* deposit momentum flux */
dx = amx*amy;
dy = dxp*amy;
vx = ppart[2+idimp*(j+npoff)];
vy = ppart[3+idimp*(j+npoff)];
vz = ppart[4+idimp*(j+npoff)];
v1 = vx*vx - vy*vy;
v2 = vx*vy;
v3 = vz*vx;
v4 = vz*vy;
samu[nn] += v1*dx;
samu[nn+1] += v2*dx;
samu[nn+2] += v3*dx;
samu[nn+3] += v4*dx;
dx = amx*dyp;
mm = nn + 4;
samu[mm] += v1*dy;
samu[mm+1] += v2*dy;
samu[mm+2] += v3*dy;
samu[mm+3] += v4*dy;
dy = dxp*dyp;
nn += mxv4;
samu[nn] += v1*dx;
samu[nn+1] += v2*dx;
samu[nn+2] += v3*dx;
samu[nn+3] += v4*dx;
mm = nn + 4;
samu[mm] += v1*dy;
samu[mm+1] += v2*dy;
samu[mm+2] += v3*dy;
samu[mm+3] += v4*dy;
}
/* deposit momentum flux to interior points in global array */
nn = nxv - noffp;
mm = nypmx - moffp;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
amu[4*(i+noffp+nxv*(j+moffp))] += samu[4*i+mxv4*j];
amu[1+4*(i+noffp+nxv*(j+moffp))] += samu[1+4*i+mxv4*j];
amu[2+4*(i+noffp+nxv*(j+moffp))] += samu[2+4*i+mxv4*j];
amu[3+4*(i+noffp+nxv*(j+moffp))] += samu[3+4*i+mxv4*j];
}
}
/* deposit momentum flux to edge points in global array */
mm = nypmx - moffp;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
amu[4*(i+noffp+nxv*moffp)] += samu[4*i];
#pragma omp atomic
amu[1+4*(i+noffp+nxv*moffp)] += samu[1+4*i];
#pragma omp atomic
amu[2+4*(i+noffp+nxv*moffp)] += samu[2+4*i];
#pragma omp atomic
amu[3+4*(i+noffp+nxv*moffp)] += samu[3+4*i];
if (mm > my) {
#pragma omp atomic
amu[4*(i+noffp+nxv*(mm+moffp-1))] += samu[4*i+mxv4*(mm-1)];
#pragma omp atomic
amu[1+4*(i+noffp+nxv*(mm+moffp-1))] += samu[1+4*i+mxv4*(mm-1)];
#pragma omp atomic
amu[2+4*(i+noffp+nxv*(mm+moffp-1))] += samu[2+4*i+mxv4*(mm-1)];
#pragma omp atomic
amu[3+4*(i+noffp+nxv*(mm+moffp-1))] += samu[3+4*i+mxv4*(mm-1)];
}
}
nn = nxv - noffp;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
amu[4*(noffp+nxv*(j+moffp))] += samu[mxv4*j];
#pragma omp atomic
amu[1+4*(noffp+nxv*(j+moffp))] += samu[1+mxv4*j];
#pragma omp atomic
amu[2+4*(noffp+nxv*(j+moffp))] += samu[2+mxv4*j];
#pragma omp atomic
amu[3+4*(noffp+nxv*(j+moffp))] += samu[3+mxv4*j];
if (nn > mx) {
#pragma omp atomic
amu[4*(nn+noffp-1+nxv*(j+moffp))] += samu[4*(nn-1)+mxv4*j];
#pragma omp atomic
amu[1+4*(nn+noffp-1+nxv*(j+moffp))] += samu[1+4*(nn-1)+mxv4*j];
#pragma omp atomic
amu[2+4*(nn+noffp-1+nxv*(j+moffp))] += samu[2+4*(nn-1)+mxv4*j];
#pragma omp atomic
amu[3+4*(nn+noffp-1+nxv*(j+moffp))] += samu[3+4*(nn-1)+mxv4*j];
}
}
}
return;
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cppgdjppost2l(float ppart[], float fxy[], float bxy[], float dcu[],
float amu[], int kpic[], int noff, int nyp, float qm,
float qbm, float dt, int idimp, int nppmx, int nx,
int mx, int my, int nxv, int nypmx, int mx1,
int mxyp1) {
/* for 2-1/2d code, this subroutine calculates particle momentum flux
and acceleration density using first-order spline interpolation.
OpenMP version using guard cells, for distributed data
data deposited in tiles
particles stored segmented array
194 flops/particle, 1 divide, 57 loads, 28 stores
input: all, output: dcu, amu
acceleration density is approximated by values at the nearest grid
points
dcu(i,n,m)=qci*(1.-dx)*(1.-dy)
dcu(i,n+1,m)=qci*dx*(1.-dy)
dcu(i,n,m+1)=qci*(1.-dx)*dy
dcu(i,n+1,m+1)=qci*dx*dy
and qci = qm*dvj/dt, where j = x,y,z, for i = 1, 3
where dvj = (vj(t+dt/2)-vj(t-dt/2))/dt
momentum flux is approximated by values at the nearest grid points
amu(i,n,m)=qci*(1.-dx)*(1.-dy)
amu(i,n+1,m)=qci*dx*(1.-dy)
amu(i,n,m+1)=qci*(1.-dx)*dy
amu(i,n+1,m+1)=qci*dx*dy
and qci = qm*vj*vk, where jk = xx-yy,xy,zx,zy, for i = 1, 4
where vj = 0.5*(vj(t+dt/2)+vj(t-dt/2),
and vk = 0.5*(vk(t+dt/2)+vk(t-dt/2))
where n,m = nearest grid points and dx = x-n, dy = y-m
velocity equations at t=t+dt/2 are calculated from:
vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t)), omy = (q/m)*by(x(t),y(t)), and
omz = (q/m)*bz(x(t),y(t)).
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][n][0] = position x of particle n in partition in tile m at t
ppart[m][n][1] = position y of particle n in partition in tile m at t
ppart[m][n][2] = x velocity of particle n in partition in tile m
at t - dt/2
ppart[m][n][3] = y velocity of particle n in partition in tile m
at t - dt/2
ppart[m][n][4] = z velocity of particle n in partition in tile m
at t - dt/2
fxy[k][j][0] = x component of force/charge at grid (j,kk)
fxy[k][j][1] = y component of force/charge at grid (j,kk)
fxy[k][j][2] = z component of force/charge at grid (j,kk)
that is, convolution of electric field over particle shape,
where kk = k + noff
bxy[k][j][0] = x component of magnetic field at grid (j,kk)
bxy[k][j][1] = y component of magnetic field at grid (j,kk)
bxy[k][j][2] = z component of magnetic field at grid (j,kk)
that is, the convolution of magnetic field over particle shape,
where kk = k + noff
dcu[k][j][i] = ith component of acceleration density
at grid point j,kk for i = 0, 2
amu[k][j][i] = ith component of momentum flux
at grid point j,kk for i = 0, 3
where kk = k + noff
kpic = number of particles per tile
noff = lowermost global gridpoint in particle partition.
nyp = number of primary (complete) gridpoints in particle partition
qm = charge on particle, in units of e
qbm = particle charge/mass ratio
dt = time interval between successive calculations
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx = system length in x direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of field arrays, must be >= nx+1
nypmx = maximum size of particle partition, including guard cells.
mx1 = (system length in x direction - 1)/mx + 1
mxyp1 = mx1*myp1, where myp1=(partition length in y direction-1)/my+1
local data */
#define MXV 33
#define MYV 33
int noffp, moffp, npoff, nppp, mxv3, mxv4;
int mnoff, i, j, k, nn, mm, nm, mn;
float qtmh, dti, dxp, dyp, amx, amy, dx, dy, dz, ox, oy, oz;
float acx, acy, acz, omxt, omyt, omzt, omt, anorm;
float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float x, y, vx, vy, vz, v1, v2, v3, v4;
float sfxy[3*MXV*MYV], sbxy[3*MXV*MYV];
float sdcu[3*MXV*MYV], samu[4*MXV*MYV];
/* float sfxy[3*(mx+1)*(my+1)], sbxy[3*(mx+1)*(my+1)]; */
/* float sdcu[3*(mx+1)*(my+1)]; */
/* float samu[4*(mx+1)*(my+1)]; */
mxv3 = 3*(mx + 1);
mxv4 = 4*(mx + 1);
qtmh = 0.5*qbm*dt;
dti = 1.0/dt;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noffp,moffp,nppp,npoff,nn,mm,nm,mn,mnoff,x,y,vx,vy,vz, \
v1,v2,v3,v4,dxp,dyp,amx,amy,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt, \
omzt,omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,sfxy,sbxy, \
sdcu,samu)
for (k = 0; k < mxyp1; k++) {
noffp = k/mx1;
moffp = my*noffp;
noffp = mx*(k - mx1*noffp);
nppp = kpic[k];
mnoff = moffp + noff;
npoff = nppmx*k;
/* load local fields from global array */
nn = (mx < nx-noffp ? mx : nx-noffp) + 1;
mm = (my < nyp-moffp ? my : nyp-moffp) + 1;
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxy[3*i+mxv3*j] = fxy[3*(i+noffp+nxv*(j+moffp))];
sfxy[1+3*i+mxv3*j] = fxy[1+3*(i+noffp+nxv*(j+moffp))];
sfxy[2+3*i+mxv3*j] = fxy[2+3*(i+noffp+nxv*(j+moffp))];
}
}
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sbxy[3*i+mxv3*j] = bxy[3*(i+noffp+nxv*(j+moffp))];
sbxy[1+3*i+mxv3*j] = bxy[1+3*(i+noffp+nxv*(j+moffp))];
sbxy[2+3*i+mxv3*j] = bxy[2+3*(i+noffp+nxv*(j+moffp))];
}
}
/* zero out local accumulators */
for (j = 0; j < mxv3*(my+1); j++) {
sdcu[j] = 0.0f;
}
for (j = 0; j < mxv4*(my+1); j++) {
samu[j] = 0.0f;
}
/* loop over particles in tile */
for (j = 0; j < nppp; j++) {
/* find interpolation weights */
x = ppart[idimp*(j+npoff)];
y = ppart[1+idimp*(j+npoff)];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = 3*(nn - noffp) + mxv3*(mm - mnoff);
mn = 4*(nn - noffp) + mxv4*(mm - mnoff);
amx = 1.0 - dxp;
amy = 1.0 - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + 3;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += mxv3;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + 3;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + 3;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += mxv3;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + 3;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
vx = ppart[2+idimp*(j+npoff)];
vy = ppart[3+idimp*(j+npoff)];
vz = ppart[4+idimp*(j+npoff)];
acx = vx + dx;
acy = vy + dy;
acz = vz + dz;
/* calculate cyclotron frequency */
omxt = qtmh*ox;
omyt = qtmh*oy;
omzt = qtmh*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0/(1.0 + omt);
omt = 0.5*(1.0 - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
dx += (rot1*acx + rot2*acy + rot3*acz)*anorm;
dy += (rot4*acx + rot5*acy + rot6*acz)*anorm;
dz += (rot7*acx + rot8*acy + rot9*acz)*anorm;
/* deposit momentum flux and acceleration density */
amx = qm*amx;
dxp = qm*dxp;
ox = 0.5*(dx + vx);
oy = 0.5*(dy + vy);
oz = 0.5*(dz + vz);
vx = dti*(dx - vx);
vy = dti*(dy - vy);
vz = dti*(dz - vz);
dx = amx*amy;
dy = dxp*amy;
v1 = ox*ox - oy*oy;
v2 = ox*oy;
v3 = oz*ox;
v4 = oz*oy;
nn = mn;
samu[nn] += v1*dx;
samu[nn+1] += v2*dx;
samu[nn+2] += v3*dx;
samu[nn+3] += v4*dx;
dx = amx*dyp;
mm = nn + 4;
samu[mm] += v1*dy;
samu[mm+1] += v2*dy;
samu[mm+2] += v3*dy;
samu[mm+3] += v4*dy;
dy = dxp*dyp;
nn += mxv4;
samu[nn] += v1*dx;
samu[nn+1] += v2*dx;
samu[nn+2] += v3*dx;
samu[nn+3] += v4*dx;
mm = nn + 4;
samu[mm] += v1*dy;
samu[mm+1] += v2*dy;
samu[mm+2] += v3*dy;
samu[mm+3] += v4*dy;
dx = amx*amy;
dy = dxp*amy;
nn = nm;
sdcu[nn] += vx*dx;
sdcu[nn+1] += vy*dx;
sdcu[nn+2] += vz*dx;
dx = amx*dyp;
mm = nn + 3;
sdcu[mm] += vx*dy;
sdcu[mm+1] += vy*dy;
sdcu[mm+2] += vz*dy;
dy = dxp*dyp;
nn += mxv3;
sdcu[nn] += vx*dx;
sdcu[nn+1] += vy*dx;
sdcu[nn+2] += vz*dx;
mm = nn + 3;
sdcu[mm] += vx*dy;
sdcu[mm+1] += vy*dy;
sdcu[mm+2] += vz*dy;
}
/* deposit currents to interior points in global array */
nn = nxv - noffp;
mm = nypmx - moffp;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
amu[4*(i+noffp+nxv*(j+moffp))] += samu[4*i+mxv4*j];
amu[1+4*(i+noffp+nxv*(j+moffp))] += samu[1+4*i+mxv4*j];
amu[2+4*(i+noffp+nxv*(j+moffp))] += samu[2+4*i+mxv4*j];
amu[3+4*(i+noffp+nxv*(j+moffp))] += samu[3+4*i+mxv4*j];
dcu[3*(i+noffp+nxv*(j+moffp))] += sdcu[3*i+mxv3*j];
dcu[1+3*(i+noffp+nxv*(j+moffp))] += sdcu[1+3*i+mxv3*j];
dcu[2+3*(i+noffp+nxv*(j+moffp))] += sdcu[2+3*i+mxv3*j];
}
}
/* deposit currents to edge points in global array */
mm = nypmx - moffp;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
amu[4*(i+noffp+nxv*moffp)] += samu[4*i];
#pragma omp atomic
amu[1+4*(i+noffp+nxv*moffp)] += samu[1+4*i];
#pragma omp atomic
amu[2+4*(i+noffp+nxv*moffp)] += samu[2+4*i];
#pragma omp atomic
amu[3+4*(i+noffp+nxv*moffp)] += samu[3+4*i];
#pragma omp atomic
dcu[3*(i+noffp+nxv*moffp)] += sdcu[3*i];
#pragma omp atomic
dcu[1+3*(i+noffp+nxv*moffp)] += sdcu[1+3*i];
#pragma omp atomic
dcu[2+3*(i+noffp+nxv*moffp)] += sdcu[2+3*i];
if (mm > my) {
#pragma omp atomic
amu[4*(i+noffp+nxv*(mm+moffp-1))] += samu[4*i+mxv4*(mm-1)];
#pragma omp atomic
amu[1+4*(i+noffp+nxv*(mm+moffp-1))] += samu[1+4*i+mxv4*(mm-1)];
#pragma omp atomic
amu[2+4*(i+noffp+nxv*(mm+moffp-1))] += samu[2+4*i+mxv4*(mm-1)];
#pragma omp atomic
amu[3+4*(i+noffp+nxv*(mm+moffp-1))] += samu[3+4*i+mxv4*(mm-1)];
#pragma omp atomic
dcu[3*(i+noffp+nxv*(mm+moffp-1))] += sdcu[3*i+mxv3*(mm-1)];
#pragma omp atomic
dcu[1+3*(i+noffp+nxv*(mm+moffp-1))] += sdcu[1+3*i+mxv3*(mm-1)];
#pragma omp atomic
dcu[2+3*(i+noffp+nxv*(mm+moffp-1))] += sdcu[2+3*i+mxv3*(mm-1)];
}
}
nn = nxv - noffp;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
amu[4*(noffp+nxv*(j+moffp))] += samu[mxv4*j];
#pragma omp atomic
amu[1+4*(noffp+nxv*(j+moffp))] += samu[1+mxv4*j];
#pragma omp atomic
amu[2+4*(noffp+nxv*(j+moffp))] += samu[2+mxv4*j];
#pragma omp atomic
amu[3+4*(noffp+nxv*(j+moffp))] += samu[3+mxv4*j];
#pragma omp atomic
dcu[3*(noffp+nxv*(j+moffp))] += sdcu[mxv3*j];
#pragma omp atomic
dcu[1+3*(noffp+nxv*(j+moffp))] += sdcu[1+mxv3*j];
#pragma omp atomic
dcu[2+3*(noffp+nxv*(j+moffp))] += sdcu[2+mxv3*j];
if (nn > mx) {
#pragma omp atomic
amu[4*(nn+noffp-1+nxv*(j+moffp))] += samu[4*(nn-1)+mxv4*j];
#pragma omp atomic
amu[1+4*(nn+noffp-1+nxv*(j+moffp))] += samu[1+4*(nn-1)+mxv4*j];
#pragma omp atomic
amu[2+4*(nn+noffp-1+nxv*(j+moffp))] += samu[2+4*(nn-1)+mxv4*j];
#pragma omp atomic
amu[3+4*(nn+noffp-1+nxv*(j+moffp))] += samu[3+4*(nn-1)+mxv4*j];
#pragma omp atomic
dcu[3*(nn+noffp-1+nxv*(j+moffp))] += sdcu[3*(nn-1)+mxv3*j];
#pragma omp atomic
dcu[1+3*(nn+noffp-1+nxv*(j+moffp))] += sdcu[1+3*(nn-1)+mxv3*j];
#pragma omp atomic
dcu[2+3*(nn+noffp-1+nxv*(j+moffp))] += sdcu[2+3*(nn-1)+mxv3*j];
}
}
}
return;
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cppgdcjppost2l(float ppart[], float fxy[], float bxy[], float cu[],
float dcu[], float amu[], int kpic[], int noff,
int nyp, float qm, float qbm, float dt, int idimp,
int nppmx, int nx, int mx, int my, int nxv,
int nypmx, int mx1, int mxyp1) {
/* for 2-1/2d code, this subroutine calculates particle momentum flux,
acceleration density and current density using first-order spline
interpolation.
OpenMP version using guard cells, for distributed data
data deposited in tiles
particles stored segmented array
218 flops/particle, 1 divide, 69 loads, 40 stores
input: all, output: cu, dcu, amu
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
and qci = qm*vj, where j = x,y,z, for i = 1, 3
where vj = .5*(vj(t+dt/2)+vj(t-dt/2))
acceleration density is approximated by values at the nearest grid
points
dcu(i,n,m)=qci*(1.-dx)*(1.-dy)
dcu(i,n+1,m)=qci*dx*(1.-dy)
dcu(i,n,m+1)=qci*(1.-dx)*dy
dcu(i,n+1,m+1)=qci*dx*dy
and qci = qm*dvj/dt, where j = x,y,z, for i = 1, 3
where dvj = (vj(t+dt/2)-vj(t-dt/2))/dt
momentum flux is approximated by values at the nearest grid points
amu(i,n,m)=qci*(1.-dx)*(1.-dy)
amu(i,n+1,m)=qci*dx*(1.-dy)
amu(i,n,m+1)=qci*(1.-dx)*dy
amu(i,n+1,m+1)=qci*dx*dy
and qci = qm*vj*vk, where jk = xx-yy,xy,zx,zy, for i = 1, 4
where vj = 0.5*(vj(t+dt/2)+vj(t-dt/2),
and vk = 0.5*(vk(t+dt/2)+vk(t-dt/2))
where n,m = nearest grid points and dx = x-n, dy = y-m
velocity equations at t=t+dt/2 are calculated from:
vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t)), omy = (q/m)*by(x(t),y(t)), and
omz = (q/m)*bz(x(t),y(t)).
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][n][0] = position x of particle n in partition in tile m at t
ppart[m][n][1] = position y of particle n in partition in tile m at t
ppart[m][n][2] = x velocity of particle n in partition in tile m
at t - dt/2
ppart[m][n][3] = y velocity of particle n in partition in tile m
at t - dt/2
ppart[m][n][4] = z velocity of particle n in partition in tile m
at t - dt/2
fxy[k][j][0] = x component of force/charge at grid (j,kk)
fxy[k][j][1] = y component of force/charge at grid (j,kk)
fxy[k][j][2] = z component of force/charge at grid (j,kk)
that is, convolution of electric field over particle shape,
where kk = k + noff
bxy[k][j][0] = x component of magnetic field at grid (j,kk)
bxy[k][j][1] = y component of magnetic field at grid (j,kk)
bxy[k][j][2] = z component of magnetic field at grid (j,kk)
that is, the convolution of magnetic field over particle shape,
where kk = k + noff
cu[k][j][i] = ith component of current density at grid point j,kk
at grid point j,kk for i = 0, 2
dcu[k][j][i] = ith component of acceleration density
at grid point j,kk for i = 0, 2
amu[k][j][i] = ith component of momentum flux
at grid point j,kk for i = 0, 3
where kk = k + noff
kpic = number of particles per tile
noff = lowermost global gridpoint in particle partition.
nyp = number of primary (complete) gridpoints in particle partition
qm = charge on particle, in units of e
qbm = particle charge/mass ratio
dt = time interval between successive calculations
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx = system length in x direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of field arrays, must be >= nx+1
nypmx = maximum size of particle partition, including guard cells.
mx1 = (system length in x direction - 1)/mx + 1
mxyp1 = mx1*myp1, where myp1=(partition length in y direction-1)/my+1
local data */
#define MXV 33
#define MYV 33
int noffp, moffp, npoff, nppp, mxv3, mxv4;
int mnoff, i, j, k, nn, mm, nm, mn;
float qtmh, dti, dxp, dyp, amx, amy, dx, dy, dz, ox, oy, oz;
float acx, acy, acz, omxt, omyt, omzt, omt, anorm;
float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float x, y, vx, vy, vz, v1, v2, v3, v4;
float sfxy[3*MXV*MYV], sbxy[3*MXV*MYV];
float scu[3*MXV*MYV], sdcu[3*MXV*MYV], samu[4*MXV*MYV];
/* float sfxy[3*(mx+1)*(my+1)], sbxy[3*(mx+1)*(my+1)]; */
/* float scu[3*(mx+1)*(my+1)], sdcu[3*(mx+1)*(my+1)]; */
/* float samu[4*(mx+1)*(my+1)]; */
mxv3 = 3*(mx + 1);
mxv4 = 4*(mx + 1);
qtmh = 0.5*qbm*dt;
dti = 1.0/dt;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noffp,moffp,nppp,npoff,nn,mm,nm,mn,mnoff,x,y,vx,vy,vz, \
v1,v2,v3,v4,dxp,dyp,amx,amy,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt, \
omzt,omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,sfxy,sbxy, \
scu,sdcu,samu)
for (k = 0; k < mxyp1; k++) {
noffp = k/mx1;
moffp = my*noffp;
noffp = mx*(k - mx1*noffp);
nppp = kpic[k];
mnoff = moffp + noff;
npoff = nppmx*k;
/* load local fields from global array */
nn = (mx < nx-noffp ? mx : nx-noffp) + 1;
mm = (my < nyp-moffp ? my : nyp-moffp) + 1;
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxy[3*i+mxv3*j] = fxy[3*(i+noffp+nxv*(j+moffp))];
sfxy[1+3*i+mxv3*j] = fxy[1+3*(i+noffp+nxv*(j+moffp))];
sfxy[2+3*i+mxv3*j] = fxy[2+3*(i+noffp+nxv*(j+moffp))];
}
}
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sbxy[3*i+mxv3*j] = bxy[3*(i+noffp+nxv*(j+moffp))];
sbxy[1+3*i+mxv3*j] = bxy[1+3*(i+noffp+nxv*(j+moffp))];
sbxy[2+3*i+mxv3*j] = bxy[2+3*(i+noffp+nxv*(j+moffp))];
}
}
/* zero out local accumulators */
for (j = 0; j < mxv3*(my+1); j++) {
scu[j] = 0.0f;
sdcu[j] = 0.0f;
}
for (j = 0; j < mxv4*(my+1); j++) {
samu[j] = 0.0f;
}
/* loop over particles in tile */
for (j = 0; j < nppp; j++) {
/* find interpolation weights */
x = ppart[idimp*(j+npoff)];
y = ppart[1+idimp*(j+npoff)];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = 3*(nn - noffp) + mxv3*(mm - mnoff);
mn = 4*(nn - noffp) + mxv4*(mm - mnoff);
amx = 1.0 - dxp;
amy = 1.0 - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + 3;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += mxv3;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + 3;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + 3;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += mxv3;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + 3;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
vx = ppart[2+idimp*(j+npoff)];
vy = ppart[3+idimp*(j+npoff)];
vz = ppart[4+idimp*(j+npoff)];
acx = vx + dx;
acy = vy + dy;
acz = vz + dz;
/* calculate cyclotron frequency */
omxt = qtmh*ox;
omyt = qtmh*oy;
omzt = qtmh*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0/(1.0 + omt);
omt = 0.5*(1.0 - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
dx += (rot1*acx + rot2*acy + rot3*acz)*anorm;
dy += (rot4*acx + rot5*acy + rot6*acz)*anorm;
dz += (rot7*acx + rot8*acy + rot9*acz)*anorm;
/* deposit momentum flux, acceleration density, and current density */
amx = qm*amx;
dxp = qm*dxp;
ox = 0.5*(dx + vx);
oy = 0.5*(dy + vy);
oz = 0.5*(dz + vz);
vx = dti*(dx - vx);
vy = dti*(dy - vy);
vz = dti*(dz - vz);
dx = amx*amy;
dy = dxp*amy;
v1 = ox*ox - oy*oy;
v2 = ox*oy;
v3 = oz*ox;
v4 = oz*oy;
nn = mn;
samu[nn] += v1*dx;
samu[nn+1] += v2*dx;
samu[nn+2] += v3*dx;
samu[nn+3] += v4*dx;
dx = amx*dyp;
mm = nn + 4;
samu[mm] += v1*dy;
samu[mm+1] += v2*dy;
samu[mm+2] += v3*dy;
samu[mm+3] += v4*dy;
dy = dxp*dyp;
nn += mxv4;
samu[nn] += v1*dx;
samu[nn+1] += v2*dx;
samu[nn+2] += v3*dx;
samu[nn+3] += v4*dx;
mm = nn + 4;
samu[mm] += v1*dy;
samu[mm+1] += v2*dy;
samu[mm+2] += v3*dy;
samu[mm+3] += v4*dy;
dx = amx*amy;
dy = dxp*amy;
nn = nm;
sdcu[nn] += vx*dx;
sdcu[nn+1] += vy*dx;
sdcu[nn+2] += vz*dx;
scu[nn] += ox*dx;
scu[nn+1] += oy*dx;
scu[nn+2] += oz*dx;
dx = amx*dyp;
mm = nn + 3;
sdcu[mm] += vx*dy;
sdcu[mm+1] += vy*dy;
sdcu[mm+2] += vz*dy;
scu[mm] += ox*dy;
scu[mm+1] += oy*dy;
scu[mm+2] += oz*dy;
dy = dxp*dyp;
nn += mxv3;
sdcu[nn] += vx*dx;
sdcu[nn+1] += vy*dx;
sdcu[nn+2] += vz*dx;
scu[nn] += ox*dx;
scu[nn+1] += oy*dx;
scu[nn+2] += oz*dx;
mm = nn + 3;
sdcu[mm] += vx*dy;
sdcu[mm+1] += vy*dy;
sdcu[mm+2] += vz*dy;
scu[mm] += ox*dy;
scu[mm+1] += oy*dy;
scu[mm+2] += oz*dy;
}
/* deposit currents to interior points in global array */
nn = nxv - noffp;
mm = nypmx - moffp;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
amu[4*(i+noffp+nxv*(j+moffp))] += samu[4*i+mxv4*j];
amu[1+4*(i+noffp+nxv*(j+moffp))] += samu[1+4*i+mxv4*j];
amu[2+4*(i+noffp+nxv*(j+moffp))] += samu[2+4*i+mxv4*j];
amu[3+4*(i+noffp+nxv*(j+moffp))] += samu[3+4*i+mxv4*j];
dcu[3*(i+noffp+nxv*(j+moffp))] += sdcu[3*i+mxv3*j];
dcu[1+3*(i+noffp+nxv*(j+moffp))] += sdcu[1+3*i+mxv3*j];
dcu[2+3*(i+noffp+nxv*(j+moffp))] += sdcu[2+3*i+mxv3*j];
cu[3*(i+noffp+nxv*(j+moffp))] += scu[3*i+mxv3*j];
cu[1+3*(i+noffp+nxv*(j+moffp))] += scu[1+3*i+mxv3*j];
cu[2+3*(i+noffp+nxv*(j+moffp))] += scu[2+3*i+mxv3*j];
}
}
/* deposit currents to edge points in global array */
mm = nypmx - moffp;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
amu[4*(i+noffp+nxv*moffp)] += samu[4*i];
#pragma omp atomic
amu[1+4*(i+noffp+nxv*moffp)] += samu[1+4*i];
#pragma omp atomic
amu[2+4*(i+noffp+nxv*moffp)] += samu[2+4*i];
#pragma omp atomic
amu[3+4*(i+noffp+nxv*moffp)] += samu[3+4*i];
#pragma omp atomic
dcu[3*(i+noffp+nxv*moffp)] += sdcu[3*i];
#pragma omp atomic
dcu[1+3*(i+noffp+nxv*moffp)] += sdcu[1+3*i];
#pragma omp atomic
dcu[2+3*(i+noffp+nxv*moffp)] += sdcu[2+3*i];
#pragma omp atomic
cu[3*(i+noffp+nxv*moffp)] += scu[3*i];
#pragma omp atomic
cu[1+3*(i+noffp+nxv*moffp)] += scu[1+3*i];
#pragma omp atomic
cu[2+3*(i+noffp+nxv*moffp)] += scu[2+3*i];
if (mm > my) {
#pragma omp atomic
amu[4*(i+noffp+nxv*(mm+moffp-1))] += samu[4*i+mxv4*(mm-1)];
#pragma omp atomic
amu[1+4*(i+noffp+nxv*(mm+moffp-1))] += samu[1+4*i+mxv4*(mm-1)];
#pragma omp atomic
amu[2+4*(i+noffp+nxv*(mm+moffp-1))] += samu[2+4*i+mxv4*(mm-1)];
#pragma omp atomic
amu[3+4*(i+noffp+nxv*(mm+moffp-1))] += samu[3+4*i+mxv4*(mm-1)];
#pragma omp atomic
dcu[3*(i+noffp+nxv*(mm+moffp-1))] += sdcu[3*i+mxv3*(mm-1)];
#pragma omp atomic
dcu[1+3*(i+noffp+nxv*(mm+moffp-1))] += sdcu[1+3*i+mxv3*(mm-1)];
#pragma omp atomic
dcu[2+3*(i+noffp+nxv*(mm+moffp-1))] += sdcu[2+3*i+mxv3*(mm-1)];
#pragma omp atomic
cu[3*(i+noffp+nxv*(mm+moffp-1))] += scu[3*i+mxv3*(mm-1)];
#pragma omp atomic
cu[1+3*(i+noffp+nxv*(mm+moffp-1))] += scu[1+3*i+mxv3*(mm-1)];
#pragma omp atomic
cu[2+3*(i+noffp+nxv*(mm+moffp-1))] += scu[2+3*i+mxv3*(mm-1)];
}
}
nn = nxv - noffp;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
amu[4*(noffp+nxv*(j+moffp))] += samu[mxv4*j];
#pragma omp atomic
amu[1+4*(noffp+nxv*(j+moffp))] += samu[1+mxv4*j];
#pragma omp atomic
amu[2+4*(noffp+nxv*(j+moffp))] += samu[2+mxv4*j];
#pragma omp atomic
amu[3+4*(noffp+nxv*(j+moffp))] += samu[3+mxv4*j];
#pragma omp atomic
dcu[3*(noffp+nxv*(j+moffp))] += sdcu[mxv3*j];
#pragma omp atomic
dcu[1+3*(noffp+nxv*(j+moffp))] += sdcu[1+mxv3*j];
#pragma omp atomic
dcu[2+3*(noffp+nxv*(j+moffp))] += sdcu[2+mxv3*j];
#pragma omp atomic
cu[3*(noffp+nxv*(j+moffp))] += scu[mxv3*j];
#pragma omp atomic
cu[1+3*(noffp+nxv*(j+moffp))] += scu[1+mxv3*j];
#pragma omp atomic
cu[2+3*(noffp+nxv*(j+moffp))] += scu[2+mxv3*j];
if (nn > mx) {
#pragma omp atomic
amu[4*(nn+noffp-1+nxv*(j+moffp))] += samu[4*(nn-1)+mxv4*j];
#pragma omp atomic
amu[1+4*(nn+noffp-1+nxv*(j+moffp))] += samu[1+4*(nn-1)+mxv4*j];
#pragma omp atomic
amu[2+4*(nn+noffp-1+nxv*(j+moffp))] += samu[2+4*(nn-1)+mxv4*j];
#pragma omp atomic
amu[3+4*(nn+noffp-1+nxv*(j+moffp))] += samu[3+4*(nn-1)+mxv4*j];
#pragma omp atomic
dcu[3*(nn+noffp-1+nxv*(j+moffp))] += sdcu[3*(nn-1)+mxv3*j];
#pragma omp atomic
dcu[1+3*(nn+noffp-1+nxv*(j+moffp))] += sdcu[1+3*(nn-1)+mxv3*j];
#pragma omp atomic
dcu[2+3*(nn+noffp-1+nxv*(j+moffp))] += sdcu[2+3*(nn-1)+mxv3*j];
#pragma omp atomic
cu[3*(nn+noffp-1+nxv*(j+moffp))] += scu[3*(nn-1)+mxv3*j];
#pragma omp atomic
cu[1+3*(nn+noffp-1+nxv*(j+moffp))] += scu[1+3*(nn-1)+mxv3*j];
#pragma omp atomic
cu[2+3*(nn+noffp-1+nxv*(j+moffp))] += scu[2+3*(nn-1)+mxv3*j];
}
}
}
return;
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cppporder2la(float ppart[], float ppbuff[], float sbufl[],
float sbufr[], int kpic[], int ncl[], int ihole[],
int ncll[], int nclr[], int noff, int nyp, int idimp,
int nppmx, int nx, int ny, int mx, int my, int mx1,
int myp1, int npbmx, int ntmax, int nbmax, int *irc) {
/* this subroutine performs first part of a particle sort by x,y grid
in tiles of mx, my
linear interpolation, with periodic boundary conditions
for distributed data, with 1d domain decomposition in y.
tiles are assumed to be arranged in 2D linear memory
this part of the algorithm has 3 steps. first, one finds particles
leaving tile and stores their number in each directon, location, and
destination in ncl and ihole. then, a prefix scan of ncl is performed
and departing particles are buffered in ppbuff in direction order.
finally, we buffer particles leaving the processor in sbufl and sbufr,
and store particle number offsets in ncll and nclr.
input: all except ppbuff, sbufl, sbufr, ncl, ihole, ncll, nclr, irc
output: ppart, ppbuff, sbufl, sbufr, ncl, ihole, ncll, nclr, irc
ppart[k][n][0] = position x of particle n in tile k
ppart[k][n][1] = position y of particle n in tile k
ppbuff[k][n][i] = i co-ordinate of particle n in tile k
sbufl = buffer for particles being sent to lower processor
sbufr = buffer for particles being sent to upper processor
kpic[k] = number of particles in tile k
ncl(i,k) = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = direction destination of particle leaving hole
all for tile k
ihole[k][0][0] = ih, number of holes left (error, if negative)
ncll = number offset being sent to lower processor
nclr = number offset being sent to upper processor
noff = lowermost global gridpoint in particle partition.
nyp = number of primary (complete) gridpoints in particle partition
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
mx1 = (system length in x direction - 1)/mx + 1
myp1 = (partition length in y direction - 1)/my + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
nbmax = size of buffers for passing particles between processors
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int mxyp1, noffp, moffp, nppp;
int i, j, k, ii, jj, ih, nh, ist, nn, mm, isum, ip, j1, kk;
float anx, any, edgelx, edgely, edgerx, edgery, dx, dy;
mxyp1 = mx1*myp1;
anx = (float) nx;
any = (float) ny;
/* find and count particles leaving tiles and determine destination */
/* update ppart, ihole, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(j,k,noffp,moffp,nppp,nn,mm,ih,nh,ist,dx,dy,edgelx,edgely, \
edgerx,edgery)
for (k = 0; k < mxyp1; k++) {
noffp = k/mx1;
moffp = my*noffp;
noffp = mx*(k - mx1*noffp);
nppp = kpic[k];
nn = nx - noffp;
nn = mx < nn ? mx : nn;
mm = nyp - moffp;
mm = my < mm ? my : mm;
ih = 0;
nh = 0;
edgelx = noffp;
edgerx = noffp + nn;
edgely = noff + moffp;
edgery = noff + moffp + mm;
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
/* loop over particles in tile */
for (j = 0; j < nppp; j++) {
dx = ppart[idimp*(j+nppmx*k)];
dy = ppart[1+idimp*(j+nppmx*k)];
/* find particles going out of bounds */
ist = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* ist = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
ppart[idimp*(j+nppmx*k)] = dx - anx;
ist = 2;
}
else if (dx < edgelx) {
if (dx < 0.0) {
dx += anx;
if (dx < anx)
ist = 1;
else
dx = 0.0;
ppart[idimp*(j+nppmx*k)] = dx;
}
else {
ist = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
ppart[1+idimp*(j+nppmx*k)] = dy - any;
ist += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
ist += 3;
else
dy = 0.0;
ppart[1+idimp*(j+nppmx*k)] = dy;
}
else {
ist += 3;
}
}
if (ist > 0) {
ncl[ist+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = ist;
}
else {
nh = 1;
}
}
}
/* set error and end of file flag */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
/* ihole overflow */
if (*irc > 0)
return;
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,isum,ist,nh,ip,j1,ii)
for (k = 0; k < mxyp1; k++) {
/* find address offset for ordered ppbuff array */
isum = 0;
for (j = 0; j < 8; j++) {
ist = ncl[j+8*k];
ncl[j+8*k] = isum;
isum += ist;
}
nh = ihole[2*(ntmax+1)*k];
ip = 0;
/* loop over particles leaving tile */
for (j = 0; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1;
ist = ihole[1+2*(j+1+(ntmax+1)*k)];
ii = ncl[ist+8*k-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[i+idimp*(ii+npbmx*k)]
= ppart[i+idimp*(j1+nppmx*k)];
}
}
else {
ip = 1;
}
ncl[ist+8*k-1] = ii + 1;
}
/* set error */
if (ip > 0)
*irc = ncl[7+8*k];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* buffer particles and their number leaving the node: */
/* update sbufl, sbufr, ncll, nclr */
kk = mx1*(myp1 - 1);
#pragma omp parallel for private(k)
for (k = 0; k < mx1; k++) {
ncll[3*k] = ncl[4+8*k] - ncl[1+8*k];
nclr[3*k] = ncl[7+8*(k+kk)] - ncl[4+8*(k+kk)];
}
/* perform prefix scan */
kk = 1;
L90: if (kk >= mx1)
goto L110;
#pragma omp parallel for private(k,ii,nn,mm)
for (k = 0; k < mx1; k++) {
ii = k/kk;
nn = kk*ii;
mm = 2*nn + kk - 1;
nn += k + kk;
if (nn < mx1) {
ncll[3*nn] += ncll[3*mm];
nclr[3*nn] += nclr[3*mm];
}
}
kk += kk;
goto L90;
L110: kk = mx1*(myp1 - 1);
#pragma omp parallel for private(i,j,k,ii,nn,mm)
for (k = 0; k < mx1; k++) {
ii = ncl[4+8*k] - ncl[1+8*k];
nn = ncll[3*k] - ii;
jj = nbmax - nn;
jj = ii < jj ? ii : jj;
for (j = 0; j < jj; j++) {
for (i = 0; i < idimp; i++) {
sbufl[i+idimp*(j+nn)]
= ppbuff[i+idimp*(j+ncl[1+8*k]+npbmx*k)];
}
}
for (i = 0; i < 3; i++) {
ncll[i+3*k] = ncl[i+2+8*k] - ncl[1+8*k] + nn;
}
ii = ncl[7+8*(k+kk)] - ncl[4+8*(k+kk)];
mm = nclr[3*k] - ii;
jj = nbmax - mm;
jj = ii < jj ? ii : jj;
for (j = 0; j < jj; j++) {
for (i = 0; i < idimp; i++) {
sbufr[i+idimp*(j+mm)]
= ppbuff[i+idimp*(j+ncl[4+8*(k+kk)]+npbmx*(k+kk))];
}
}
for (i = 0; i < 3; i++) {
nclr[i+3*k] = ncl[i+5+8*(k+kk)] - ncl[4+8*(k+kk)] + mm;
}
}
/* sbufl or sbufr overflow */
nn = ncll[3*mx1-1];
mm = nclr[3*mx1-1];
ii = nn > mm ? nn : mm;
if (ii > nbmax)
*irc = ii;
return;
}
/*--------------------------------------------------------------------*/
void cppporderf2la(float ppart[], float ppbuff[], float sbufl[],
float sbufr[], int ncl[], int ihole[], int ncll[],
int nclr[], int idimp, int nppmx, int mx1, int myp1,
int npbmx, int ntmax, int nbmax, int *irc) {
/* this subroutine performs first part of a particle sort by x,y grid
in tiles of mx, my
linear interpolation, with periodic boundary conditions
for distributed data, with 1d domain decomposition in y.
tiles are assumed to be arranged in 2D linear memory
this part of the algorithm has 2 steps. first, a prefix scan of ncl
is performed and departing particles are buffered in ppbuff in
direction order. then, we buffer particles leaving the processor in
sbufl and sbufr, and store particle number offsets in ncll and nclr.
it assumes that the number, location, and destination of particles
leaving a tile have been previously stored in ncl and ihole by the
cppgppushf2l procedure.
input: all except ppbuff, sbufl, sbufr, ncll, nclr, irc
output: ppart, ppbuff, sbufl, sbufr, ncl, ncll, nclr, irc
ppart[k][n][0] = position x of particle n in tile k
ppart[k][n][1] = position y of particle n in tile k
ppbuff[k][n][i] = i co-ordinate of particle n in tile k
sbufl = buffer for particles being sent to lower processor
sbufr = buffer for particles being sent to upper processor
ncl(i,k) = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = direction destination of particle leaving hole
all for tile k
ihole[k][0][0] = ih, number of holes left (error, if negative)
ncll = number offset being sent to lower processor
nclr = number offset being sent to upper processor
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
mx1 = (system length in x direction - 1)/mx + 1
myp1 = (partition length in y direction - 1)/my + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
nbmax = size of buffers for passing particles between processors
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int mxyp1;
int i, j, k, ii, jj, nh, ist, nn, mm, isum, ip, j1, kk;
mxyp1 = mx1*myp1;
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,isum,ist,nh,ip,j1,ii)
for (k = 0; k < mxyp1; k++) {
/* find address offset for ordered ppbuff array */
isum = 0;
for (j = 0; j < 8; j++) {
ist = ncl[j+8*k];
ncl[j+8*k] = isum;
isum += ist;
}
nh = ihole[2*(ntmax+1)*k];
ip = 0;
/* loop over particles leaving tile */
for (j = 0; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1;
ist = ihole[1+2*(j+1+(ntmax+1)*k)];
ii = ncl[ist+8*k-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[i+idimp*(ii+npbmx*k)]
= ppart[i+idimp*(j1+nppmx*k)];
}
}
else {
ip = 1;
}
ncl[ist+8*k-1] = ii + 1;
}
/* set error */
if (ip > 0)
*irc = ncl[7+8*k];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* buffer particles and their number leaving the node: */
/* update sbufl, sbufr, ncll, nclr */
kk = mx1*(myp1 - 1);
#pragma omp parallel for private(k)
for (k = 0; k < mx1; k++) {
ncll[3*k] = ncl[4+8*k] - ncl[1+8*k];
nclr[3*k] = ncl[7+8*(k+kk)] - ncl[4+8*(k+kk)];
}
/* perform prefix scan */
kk = 1;
L90: if (kk >= mx1)
goto L110;
#pragma omp parallel for private(k,ii,nn,mm)
for (k = 0; k < mx1; k++) {
ii = k/kk;
nn = kk*ii;
mm = 2*nn + kk - 1;
nn += k + kk;
if (nn < mx1) {
ncll[3*nn] += ncll[3*mm];
nclr[3*nn] += nclr[3*mm];
}
}
kk += kk;
goto L90;
L110: kk = mx1*(myp1 - 1);
#pragma omp parallel for private(i,j,k,ii,nn,mm)
for (k = 0; k < mx1; k++) {
ii = ncl[4+8*k] - ncl[1+8*k];
nn = ncll[3*k] - ii;
jj = nbmax - nn;
jj = ii < jj ? ii : jj;
for (j = 0; j < jj; j++) {
for (i = 0; i < idimp; i++) {
sbufl[i+idimp*(j+nn)]
= ppbuff[i+idimp*(j+ncl[1+8*k]+npbmx*k)];
}
}
for (i = 0; i < 3; i++) {
ncll[i+3*k] = ncl[i+2+8*k] - ncl[1+8*k] + nn;
}
ii = ncl[7+8*(k+kk)] - ncl[4+8*(k+kk)];
mm = nclr[3*k] - ii;
jj = nbmax - mm;
jj = ii < jj ? ii : jj;
for (j = 0; j < jj; j++) {
for (i = 0; i < idimp; i++) {
sbufr[i+idimp*(j+mm)]
= ppbuff[i+idimp*(j+ncl[4+8*(k+kk)]+npbmx*(k+kk))];
}
}
for (i = 0; i < 3; i++) {
nclr[i+3*k] = ncl[i+5+8*(k+kk)] - ncl[4+8*(k+kk)] + mm;
}
}
/* sbufl or sbufr overflow */
nn = ncll[3*mx1-1];
mm = nclr[3*mx1-1];
ii = nn > mm ? nn : mm;
if (ii > nbmax)
*irc = ii;
return;
}
/*--------------------------------------------------------------------*/
void cppporder2lb(float ppart[], float ppbuff[], float rbufl[],
float rbufr[], int kpic[], int ncl[], int ihole[],
int mcll[], int mclr[], int idimp, int nppmx, int mx1,
int myp1, int npbmx, int ntmax, int nbmax, int *irc) {
/* this subroutine performs second part of a particle sort by x,y grid
in tiles of mx, my
linear interpolation, with periodic boundary conditions
for distributed data, with 1d domain decomposition in y.
tiles are assumed to be arranged in 2D linear memory
incoming particles from other tiles are copied from ppbuff, rbufl, and
rbufr into ppart
input: all except ppart, kpic, irc
output: ppart, kpic, irc
ppart[k][n][0] = position x of particle n in tile k
ppart[k][n][1] = position y of particle n in tile k
ppbuff[k][n][i] = i co-ordinate of particle n in tile k
rbufl = buffer for particles being received from lower processor
rbufr = buffer for particles being received from upper processor
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = direction destination of particle leaving hole
all for tile k
ihole[k][0][0] = ih, number of holes left (error, if negative)
mcll = number offset being received from lower processor
mclr = number offset being received from upper processor
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
mx1 = (system length in x direction - 1)/mx + 1
myp1 = (partition length in y direction - 1)/my + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
nbmax = size of buffers for passing particles between processors
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int mxyp1, nppp, ncoff, noff, moff;
int i, j, k, ii, kx, ky, ih, nh, ist;
int ip, j1, j2, kxl, kxr, kk, kl, kr;
int ks[8];
mxyp1 = mx1*myp1;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,ii,kk,nppp,kx,ky,kl,kr,kxl,kxr,ih,nh,ncoff,noff,moff, \
ist,j1,j2,ip,ks)
for (k = 0; k < mxyp1; k++) {
nppp = kpic[k];
ky = k/mx1;
/* loop over tiles in y */
kk = ky*mx1;
/* find tile above */
kl = (ky - 1)*mx1;
/* find tile below */
kr = (ky + 1)*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
ks[0] = kxr + kk;
ks[1] = kxl + kk;
ks[2] = kx + kr;
ks[3] = kxr + kr;
ks[4] = kxl + kr;
ks[5] = kx + kl;
ks[6] = kxr + kl;
ks[7] = kxl + kl;
/* loop over directions */
nh = ihole[2*(ntmax+1)*k];
noff = 0;
moff = 0;
if (ky==0) {
if (kx > 0)
noff = mcll[2+3*(kx-1)];
}
if (ky==(myp1-1)) {
if (kx > 0)
moff = mclr[2+3*(kx-1)];
}
ncoff = 0;
ih = 0;
ist = 0;
j1 = 0;
for (ii = 0; ii < 8; ii++) {
/* ip = number of particles coming from direction ii */
if (ks[ii] < 0) {
if (ii > 5)
noff = mcll[ii-6+3*(ks[ii]+mx1)];
ip = mcll[ii-5+3*(ks[ii]+mx1)] - noff;
}
else if (ks[ii] >= mxyp1) {
if (ii > 2)
moff = mclr[ii-3+3*(ks[ii]-mxyp1)];
ip = mclr[ii-2+3*(ks[ii]-mxyp1)] - moff;
}
else {
if (ii > 0)
ncoff = ncl[ii-1+8*ks[ii]];
ip = ncl[ii+8*ks[ii]] - ncoff;
}
for (j = 0; j < ip; j++) {
ih += 1;
/* insert incoming particles into holes */
if (ih <= nh) {
j1 = ihole[2*(ih+(ntmax+1)*k)] - 1;
}
/* place overflow at end of array */
else {
j1 = nppp;
nppp += 1;
}
if (j1 < nppmx) {
if (ks[ii] < 0) {
for (i = 0; i < idimp; i++) {
ppart[i+idimp*(j1+nppmx*k)]
= rbufl[i+idimp*(j+noff)];
}
}
else if (ks[ii] >= mxyp1) {
for (i = 0; i < idimp; i++) {
ppart[i+idimp*(j1+nppmx*k)]
= rbufr[i+idimp*(j+moff)];
}
}
else {
for (i = 0; i < idimp; i++) {
ppart[i+idimp*(j1+nppmx*k)]
= ppbuff[i+idimp*(j+ncoff+npbmx*ks[ii])];
}
}
}
else {
ist = 1;
}
}
}
/* set error */
if (ist > 0)
*irc = j1+1;
/* fill up remaining holes in particle array with particles from bottom */
if (ih < nh) {
ip = nh - ih;
for (j = 0; j < ip; j++) {
j1 = nppp - j - 1;
j2 = ihole[2*(nh-j+(ntmax+1)*k)] - 1;
if (j1 > j2) {
/* move particle only if it is below current hole */
for (i = 0; i < idimp; i++) {
ppart[i+idimp*(j2+nppmx*k)]
= ppart[i+idimp*(j1+nppmx*k)];
}
}
}
nppp -= ip;
}
kpic[k] = nppp;
}
return;
}
/*--------------------------------------------------------------------*/
void cppcguard2xl(float fxy[], int myp, int nx, int ndim, int nxe,
int nypmx) {
/* replicate extended periodic vector field in x direction
linear interpolation, for distributed data
myp = number of full or partial grids in particle partition
nx = system length in x direction
ndim = leading dimension of array fxy
nxe = first dimension of field arrays, must be >= nx+1
nypmx = maximum size of particle partition, including guard cells
local data */
int i, k, kk, myp1;
/* replicate edges of extended field */
myp1 = myp + 1;
for (k = 0; k < myp1; k++) {
kk = ndim*nxe*k;
for (i = 0; i < ndim; i++) {
fxy[i+ndim*nx+kk] = fxy[i+kk];
}
}
return;
}
/*--------------------------------------------------------------------*/
void cppaguard2xl(float q[], int myp, int nx, int nxe, int nypmx) {
/* accumulate extended periodic scalar field in x direction
linear interpolation, for distributed data
myp = number of full or partial grids in particle partition
nx = system length in x direction
nxe = first dimension of field arrays, must be >= nx+1
nypmx = maximum size of particle partition, including guard cells
local data */
int k, myp1;
/* accumulate edges of extended field */
myp1 = myp + 1;
for (k = 0; k < myp1; k++) {
q[nxe*k] += q[nx+nxe*k];
q[nx+nxe*k] = 0.0;
}
return;
}
/*--------------------------------------------------------------------*/
void cppacguard2xl(float cu[], int myp, int nx, int ndim, int nxe,
int nypmx) {
/* accumulate extended periodic vector field in x direction
linear interpolation, for distributed data
myp = number of full or partial grids in particle partition
nx = system length in x direction
ndim = leading dimension of array fxy
nxe = first dimension of field arrays, must be >= nx+1
nypmx = maximum size of particle partition, including guard cells
implicit none
real cu
integer myp, nx, ndim, nxe, nypmx
dimension cu(ndim,nxe,nypmx)
local data */
int i, k, kk, myp1;
/* accumulate edges of extended field */
myp1 = myp + 1;
for (k = 0; k < myp1; k++) {
kk = ndim*nxe*k;
for (i = 0; i < ndim; i++) {
cu[i+kk] += cu[i+ndim*nx+kk];
cu[i+ndim*nx+kk] = 0.0;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cppascfguard2l(float dcu[], float cus[], int nyp, float q2m0,
int nx, int nxe, int nypmx) {
/* add scaled field to extended periodic field
linear interpolation, for distributed data
nyp = number of primary (complete) gridpoints in particle partition
q2m0 = wp0/affp, where
wp0 = normalized total plasma frequency squared
affp = normalization constant = nx*ny/np, where np=number of particles
nx = system length in x direction
nxe = first dimension of field arrays, must be >= nx+1
nypmx = maximum size of particle partition, including guard cells
local data */
int i, j, k;
#pragma omp parallel for private(i,j,k)
for (k = 0; k < nyp; k++) {
for (j = 0; j < nx; j++) {
for (i = 0; i < 3; i++) {
dcu[i+3*j+3*nxe*k] -= q2m0*cus[i+3*j+3*nxe*k];
}
}
}
return;
}
/*--------------------------------------------------------------------*/
void cppfwpminmx2(float qe[], int nyp, float qbme, float *wpmax,
float *wpmin, int nx, int nxe, int nypmx) {
/* calculates maximum and minimum plasma frequency. assumes guard cells
have already been added
qe = charge density for electrons
nyp = number of primary gridpoints in particle partition
qbme = charge/mass ratio for electrons
wpmax/wpmin = maximum/minimum plasma frequency
nx = system length in x direction
nxe = first dimension of field array, must be >= nx
nypmx = maximum size of particle partition, including guard cells.
local data */
int j, k;
float tpmax, tpmin, at1;
tpmax = qbme*qe[0];
tpmin = tpmax;
#pragma omp parallel for private(j,k)
for (k = 0; k < nyp; k++) {
for (j = 0; j < nx; j++) {
at1 = qbme*qe[j+nxe*k];
#pragma omp critical
tpmax = at1 > tpmax ? at1 : tpmax;
#pragma omp critical
tpmin = at1 < tpmin ? at1 : tpmin;
}
}
*wpmax = tpmax;
*wpmin = tpmin;
return;
}
/*--------------------------------------------------------------------*/
void cmppois23(float complex q[], float complex fxy[], int isign,
float complex ffc[], float ax, float ay, float affp,
float *we, int nx, int ny, int kstrt, int nyv, int kxp,
int nyhd) {
/* this subroutine solves 2d poisson's equation in fourier space for
force/charge (or convolution of electric field over particle shape)
with periodic boundary conditions. Zeros out z component.
for distributed data.
for isign = 0, input: isign,ax,ay,affp,nx,ny,kstrt,nyv,kxp,nyhd,
output: ffc
for isign /= 0, input: q,ffc,isign,nx,ny,kstrt,nyv,kxp,nyhd,
output: fxy,we
approximate flop count is: 33*nxc*nyc + 15*(nxc + nyc)
where nxc = (nx/2-1)/nvp, nyc = ny/2 - 1, and nvp = number of procs
the equation used is:
fx[ky][kx] = -sqrt(-1)*kx*g(kx,ky)*s(kx,ky)*q(kx,ky),
fy[ky][kx] = -sqrt(-1)*ky*g(kx,ky)*s(kx,ky)*q(kx,ky),
fz[ky][kx] = zero,
where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers,
g[ky][kx] = (affp/(kx**2+ky**2))*s(kx,ky),
s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2)/2), except for
fx(kx=pi) = fy(kx=pi) = fx(ky=pi) = fy(ky=pi) = 0, and
fx(kx=0,ky=0) = fy(kx=0,ky=0) = 0.
q[k][j] = complex charge density for fourier mode (jj-1,k-1)
fxy[k][j][0] = x component of complex force/charge,
fxy[k][j][1] = y component of complex force/charge,
fxy[k][j][2] = zero,
for fourier mode (jj-1,k-1), where jj = j + kxp*(kstrt - 1)
kxp = number of data values per block
kstrt = starting data block number
if isign = 0, form factor array is prepared
if isign is not equal to 0, force/charge is calculated.
aimag(ffc[k][j]) = finite-size particle shape factor s
real(ffc[k][j])) = potential green's function g
for fourier mode (jj-1,k-1), where jj = j + kxp*(kstrt - 1)
ax/ay = half-width of particle in x/y direction
affp = normalization constant = nx*ny/np, where np=number of particles
electric field energy is also calculated, using
we = nx*ny*sum((affp/(kx**2+ky**2))*|q(kx,ky)*s(kx,ky)|**2)
nx/ny = system length in x/y direction
nyv = first dimension of field arrays, must be >= ny
nyhd = first dimension of form factor array, must be >= nyh
local data */
int nxh, nyh, ks, joff, kxps, j, jj, jk, jk3, k, k1;
float dnx, dny, dkx, dky, at1, at2, at3, at4;
float complex zero, zt1, zt2;
double wp, sum1;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
ks = kstrt - 1;
joff = kxp*ks;
kxps = nxh - joff;
kxps = 0 > kxps ? 0 : kxps;
kxps = kxp < kxps ? kxp : kxps;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
zero = 0.0 + 0.0*_Complex_I;
if (isign != 0)
goto L30;
if (kstrt > nxh) return;
/* prepare form factor array */
for (j = 0; j < kxps; j++) {
dkx = dnx*(float) (j + joff);
jj = nyhd*j;
at1 = dkx*dkx;
at2 = pow((dkx*ax),2);
for (k = 0; k < nyh; k++) {
dky = dny*(float) k;
at3 = dky*dky + at1;
at4 = exp(-.5*(pow((dky*ay),2) + at2));
if (at3==0.0) {
ffc[k+jj] = affp + 1.0*_Complex_I;
}
else {
ffc[k+jj] = (affp*at4/at3) + at4*_Complex_I;
}
}
}
return;
/* calculate force/charge and sum field energy */
L30: sum1 = 0.0;
if (kstrt > nxh)
goto L70;
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
#pragma omp parallel for \
private(j,k,k1,jj,jk,jk3,dkx,at1,at2,at3,zt1,zt2,wp) \
reduction(+:sum1)
for (j = 0; j < kxps; j++) {
dkx = dnx*(float) (j + joff);
jj = nyhd*j;
jk = nyv*j;
jk3 = 3*jk;
wp = 0.0;
if ((j+joff) > 0) {
for (k = 1; k < nyh; k++) {
k1 = ny - k;
at1 = crealf(ffc[k+jj])*cimagf(ffc[k+jj]);
at2 = dkx*at1;
at3 = dny*at1*(float) k;
zt1 = cimagf(q[k+jk]) - crealf(q[k+jk])*_Complex_I;
zt2 = cimagf(q[k1+jk]) - crealf(q[k1+jk])*_Complex_I;
fxy[3*k+jk3] = at2*zt1;
fxy[1+3*k+jk3] = at3*zt1;
fxy[2+3*k+jk3] = zero;
fxy[3*k1+jk3] = at2*zt2;
fxy[1+3*k1+jk3] = -at3*zt2;
fxy[2+3*k1+jk3] = zero;
wp += at1*(q[k+jk]*conjf(q[k+jk])
+ q[k1+jk]*conjf(q[k1+jk]));
}
/* mode numbers ky = 0, ny/2 */
k1 = nyh;
at1 = crealf(ffc[jj])*cimagf(ffc[jj]);
at3 = dkx*at1;
zt1 = cimagf(q[jk]) - crealf(q[jk])*_Complex_I;
fxy[jk3] = at3*zt1;
fxy[1+jk3] = zero;
fxy[2+jk3] = zero;
fxy[3*k1+jk3] = zero;
fxy[1+3*k1+jk3] = zero;
fxy[2+3*k1+jk3] = zero;
wp += at1*(q[jk]*conjf(q[jk]));
}
sum1 += wp;
}
wp = 0.0;
/* mode numbers kx = 0, nx/2 */
if (ks==0) {
for (k = 1; k < nyh; k++) {
k1 = ny - k;
at1 = crealf(ffc[k])*cimagf(ffc[k]);
at2 = dny*at1*(float) k;
zt1 = cimagf(q[k]) - crealf(q[k])*_Complex_I;
fxy[3*k] = zero;
fxy[1+3*k] = at2*zt1;
fxy[2+3*k] = zero;
fxy[3*k1] = zero;
fxy[1+3*k1] = zero;
fxy[2+3*k1] = zero;
wp += at1*(q[k]*conjf(q[k]));
}
k1 = 3*nyh;
fxy[0] = zero;
fxy[1] = zero;
fxy[2] = zero;
fxy[k1] = zero;
fxy[1+k1] = zero;
fxy[2+k1] = zero;
}
sum1 += wp;
L70:
*we = sum1*((float) nx)*((float) ny);
return;
}
/*--------------------------------------------------------------------*/
void cmppcuperp2(float complex cu[], int nx, int ny, int kstrt, int nyv,
int kxp) {
/* this subroutine calculates the transverse current in fourier space
input: all, output: cu
approximate flop count is: 36*nxc*nyc
and nxc*nyc divides
where nxc = (nx/2-1)/nvp, nyc = ny/2 - 1, and nvp = number of procs
the transverse current is calculated using the equation:
cux[ky][kx] = cux(kx,ky)-kx*(kx*cux(kx,ky)+ky*cuy(kx,ky))/(kx*kx+ky*ky)
cuy[ky][kx] = cuy(kx,ky)-ky*(kx*cux(kx,ky)+ky*cuy(kx,ky))/(kx*kx+ky*ky)
where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers,
except for cux(kx=pi) = cuy(kx=pi) = 0, cux(ky=pi) = cuy(ky=pi) = 0,
and cux(kx=0,ky=0) = cuy(kx=0,ky=0) = 0.
cu[j][k][i] = i-th component of complex current density and
for fourier mode (jj-1,k-1), where jj = j + kxp*(kstrt - 1)
nx/ny = system length in x/y direction
kstrt = starting data block number
nyv = first dimension of field arrays, must be >= ny
kxp = number of data values per block
local data */
int nxh, nyh, ks, joff, kxps, j, jk3, k, k1;
float dnx, dny, dkx, dky, dkx2, at1;
float complex zero, zt1;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
ks = kstrt - 1;
joff = kxp*ks;
kxps = nxh - joff;
kxps = 0 > kxps ? 0 : kxps;
kxps = kxp < kxps ? kxp : kxps;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
zero = 0.0 + 0.0*_Complex_I;
/* calculate transverse part of current */
if (kstrt > nxh)
return;
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
#pragma omp parallel for private(j,k,k1,jk3,dkx,dkx2,dky,at1,zt1)
for (j = 0; j < kxps; j++) {
dkx = dnx*(float) (j + joff);
dkx2 = dkx*dkx;
jk3 = 3*nyv*j;
if ((j+joff) > 0) {
for (k = 1; k < nyh; k++) {
k1 = ny - k;
dky = dny*(float) k;
at1 = 1.0/(dky*dky + dkx2);
zt1 = at1*(dkx*cu[3*k+jk3] + dky*cu[1+3*k+jk3]);
cu[3*k+jk3] -= dkx*zt1;
cu[1+3*k+jk3] -= dky*zt1;
zt1 = at1*(dkx*cu[3*k1+jk3] - dky*cu[1+3*k1+jk3]);
cu[3*k1+jk3] -= dkx*zt1;
cu[1+3*k1+jk3] += dky*zt1;
}
/* mode numbers ky = 0, ny/2 */
k1 = nyh;
cu[jk3] = zero;
cu[3*k1+jk3] = zero;
cu[1+3*k1+jk3] = zero;
}
}
/* mode numbers kx = 0, nx/2 */
if (ks==0) {
for (k = 1; k < nyh; k++) {
k1 = ny - k;
cu[1+3*k] = zero;
cu[3*k1] = zero;
cu[1+3*k1] = zero;
}
k1 = 3*nyh;
cu[0] = zero;
cu[1] = zero;
cu[k1] = zero;
cu[1+k1] = zero;
}
return;
}
/*--------------------------------------------------------------------*/
void cmppbbpoisp23(float complex cu[], float complex bxy[],
float complex ffc[], float ci, float *wm, int nx,
int ny, int kstrt, int nyv, int kxp, int nyhd) {
/* this subroutine solves 2-1/2d poisson's equation in fourier space for
magnetic field (or convolution of magnetic field over particle shape)
with periodic boundary conditions for distributed data.
input: cu,ffc,ci,nx,ny,kstrt,nyv,kxp,nyhd, output: bxy,wm
approximate flop count is: 85*nxc*nyc + 36*(nxc + nyc)
where nxc = (nx/2-1)/nvp, nyc = ny/2 - 1, and nvp = number of procs
magnetic field is calculated using the equations:
bx[ky][kx] = ci*ci*sqrt(-1)*g[ky][kx]*ky*cuz[ky][kx]*s[ky][kx],
by[ky][kx] = -ci*ci*sqrt(-1)*g[ky][kx]*kx*cuz[ky][kx]*s[ky][kx],
bz[ky][kx] = ci*ci*sqrt(-1)*g[ky][kx]*(kx*cuy[ky][kx]-ky*cux[ky][kx])*
s[ky][kx],
where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers,
g[ky][kx] = (affp/(kx**2+ky**2))*s[ky][kx],
s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2)/2), except for
bx(kx=pi) = by(kx=pi) = bz(kx=pi) = 0,
bx(ky=pi) = by(ky=pi) = bz(ky=pi) = 0,
bx(kx=0,ky=0) = by(kx=0,ky=0) = bz(kx=0,ky=0) = 0.
cu[j][k][i] = i-th component of complex current density and
bxy[j][k][i] = i-th component of complex magnetic field,
for fourier mode (jj,k), where jj = j + kxp*(kstrt - 1)
kxp = number of data values per block
kstrt = starting data block number
imag(ffc[j][k]) = finite-size particle shape factor s
real(ffc[j][k]) = potential green's function g
for fourier mode (jj,k), where jj = j + kxp*(kstrt - 1)
ci = reciprocal of velocity of light
magnetic field energy is also calculated, using
wm = nx*ny*sum((affp/(kx**2+ky**2))*ci*ci
|cu[ky][kx]*s[ky][kx]|**2), where
affp = normalization constant = nx*ny/np, where np=number of particles
this expression is valid only if the current is divergence-free
nx/ny = system length in x/y direction
nyv = second dimension of field arrays, must be >= ny
nyhd = first dimension of form factor array, must be >= nyh
local data */
int nxh, nyh, ks, joff, kxps, j, jj, jk, k, k1;
float ci2, dnx, dny, dkx, dky, at1, at2, at3;
float complex zero, zt1, zt2, zt3;
double wp, sum1;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
ks = kstrt - 1;
joff = kxp*ks;
kxps = nxh - joff;
kxps = 0 > kxps ? 0 : kxps;
kxps = kxp < kxps ? kxp : kxps;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
zero = 0.0 + 0.0*_Complex_I;
ci2 = ci*ci;
/* calculate magnetic field and sum field energy */
sum1 = 0.0;
if (kstrt > nxh)
goto L40;
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
#pragma omp parallel for \
private(j,k,k1,jj,jk,dkx,dky,at1,at2,at3,zt1,zt2,zt3,wp) \
reduction(+:sum1)
for (j = 0; j < kxps; j++) {
dkx = dnx*(float) (j + joff);
jj = nyhd*j;
jk = nyv*j;
wp = 0.0;
if ((j+joff) > 0) {
for (k = 1; k < nyh; k++) {
k1 = ny - k;
dky = dny*(float) k;
at1 = ci2*crealf(ffc[k+jj])*cimagf(ffc[k+jj]);
at2 = dky*at1;
at3 = dkx*at1;
zt1 = -cimagf(cu[2+3*k+3*jk])
+ crealf(cu[2+3*k+3*jk])*_Complex_I;
zt2 = -cimagf(cu[1+3*k+3*jk])
+ crealf(cu[1+3*k+3*jk])*_Complex_I;
zt3 = -cimagf(cu[3*k+3*jk])
+ crealf(cu[3*k+3*jk])*_Complex_I;
bxy[3*k+3*jk] = at2*zt1;
bxy[1+3*k+3*jk] = -at3*zt1;
bxy[2+3*k+3*jk] = at3*zt2 - at2*zt3;
zt1 = -cimagf(cu[2+3*k1+3*jk])
+ crealf(cu[2+3*k1+3*jk])*_Complex_I;
zt2 = -cimagf(cu[1+3*k1+3*jk])
+ crealf(cu[1+3*k1+3*jk])*_Complex_I;
zt3 = -cimagf(cu[3*k1+3*jk])
+ crealf(cu[3*k1+3*jk])*_Complex_I;
bxy[3*k1+3*jk] = -at2*zt1;
bxy[1+3*k1+3*jk] = -at3*zt1;
bxy[2+3*k1+3*jk] = at3*zt2 + at2*zt3;
wp += at1*(cu[3*k+3*jk]*conjf(cu[3*k+3*jk])
+ cu[1+3*k+3*jk]*conjf(cu[1+3*k+3*jk])
+ cu[2+3*k+3*jk]*conjf(cu[2+3*k+3*jk])
+ cu[3*k1+3*jk]*conjf(cu[3*k1+3*jk])
+ cu[1+3*k1+3*jk]*conjf(cu[1+3*k1+3*jk])
+ cu[2+3*k1+3*jk]*conjf(cu[2+3*k1+3*jk]));
}
/* mode numbers ky = 0, ny/2 */
k1 = nyh;
at1 = ci2*crealf(ffc[jj])*cimagf(ffc[jj]);
at2 = dkx*at1;
zt1 = -cimagf(cu[2+3*jk])
+ crealf(cu[2+3*jk])*_Complex_I;
zt2 = -cimagf(cu[1+3*jk])
+ crealf(cu[1+3*jk])*_Complex_I;
bxy[3*jk] = zero;
bxy[1+3*jk] = -at2*zt1;
bxy[2+3*jk] = at2*zt2;
bxy[3*k1+3*jk] = zero;
bxy[1+3*k1+3*jk] = zero;
bxy[2+3*k1+3*jk] = zero;
wp += at1*(cu[3*jk]*conjf(cu[3*jk])
+ cu[1+3*jk]*conjf(cu[1+3*jk])
+ cu[2+3*jk]*conjf(cu[2+3*jk]));
}
sum1 += wp;
}
wp = 0.0;
/* mode numbers kx = 0, nx/2 */
if (ks==0) {
for (k = 1; k < nyh; k++) {
k1 = ny - k;
dky = dny*(float) k;
at1 = ci2*crealf(ffc[k])*cimagf(ffc[k]);
at2 = dky*at1;
zt1 = -cimagf(cu[2+3*k]) + crealf(cu[2+3*k])*_Complex_I;
zt2 = -cimagf(cu[3*k]) + crealf(cu[3*k])*_Complex_I;
bxy[3*k] = at2*zt1;
bxy[1+3*k] = zero;
bxy[2+3*k] = -at2*zt2;
bxy[3*k1] = zero;
bxy[1+3*k1] = zero;
bxy[2+3*k1] = zero;
wp += at1*(cu[3*k]*conjf(cu[3*k]) + cu[1+3*k]*conjf(cu[1+3*k])
+ cu[2+3*k]*conjf(cu[2+3*k]));
}
k1 = 3*nyh;
bxy[0] = zero;
bxy[1] = zero;
bxy[2] = zero;
bxy[k1] = zero;
bxy[1+k1] = zero;
bxy[2+k1] = zero;
}
sum1 += wp;
L40:
*wm = sum1*((float) nx)*((float) ny);
return;
}
/*--------------------------------------------------------------------*/
void cppbaddext2(float bxy[], int nyp, float omx, float omy, float omz,
int nx, int nxe, int nypmx) {
/* adds constant to magnetic field for 2-1/2d code
bxy = magnetic field
nyp = number of primary (complete) gridpoints in particle partition
omx/omy/omz = magnetic field electron cyclotron frequency in x/y/z
nx = system length in x direction
nxe = first dimension of field array, must be >= nx
nypmx = maximum size of particle partition, including guard cells.
local data */
int j, k;
#pragma omp parallel for private(j,k)
for (k = 0; k < nyp; k++) {
for (j = 0; j < nx; j++) {
bxy[3*j+3*nxe*k] += omx;
bxy[1+3*j+3*nxe*k] += omy;
bxy[2+3*j+3*nxe*k] += omz;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cmppdcuperp23(float complex dcu[], float complex amu[], int nx,
int ny, int kstrt, int nyv, int kxp) {
/* this subroutine calculates transverse part of the derivative of
the current density from the momentum flux
in 2-1/2d with periodic boundary conditions.
input: all, output: dcu
approximate flop count is: 45*nxc*nyc
and nxc*nyc divides
where nxc = (nx/2-1)/nvp, nyc = ny/2 - 1, and nvp = number of procs
the derivative of the current is calculated using the equations:
dcu[kx][ky][0] = -sqrt(-1)*(kx*vx*vx+ky*vx*vy)
dcu[kx][ky][1] = -sqrt(-1)*(kx*vx*vy+ky*vy*vy)
dcu[kx][ky][2] = -sqrt(-1)*(kx*vx*vz+ky*vy*vz)
where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers,
except for dcu(i,kx=pi) = dcu(i,ky=pi) = dcu(i,kx=0,ky=0) = 0.
the transverse part is calculated using the equation:
dcu[kx][ky][0] = dcu[kx][ky][0]-kx*(kx*dcu[kx][ky][0]
+ ky*dcu[kx][ky][1])/(kx*kx+ky*ky)
dcu[kx][ky][1] = dcu[kx][ky][1]-ky*(kx*dcu[kx][ky][0]
+ ky*dcu[kx][ky][1])/(kx*kx+ky*ky)
on output:
dcu[j][k][i] = i-th component of transverse part of complex derivative
of current for fourier mode (jj,k), where jj = j + kxp*(kstrt - 1)
amu[j][k][0] = xx component of complex momentum flux
amu[j][k][1] = xy component of complex momentum flux
amu[j][k][2] = zx component of complex momentum flux
amu[j][k][3] = zy component of complex momentum flux
for fourier mode (jj,k), where jj = j + kxp*(kstrt - 1)
nx/ny = system length in x/y direction
kstrt = starting data block number
nyv = second dimension of field arrays, must be >= ny
kxp = number of data values per block
local data */
int nxh, nyh, ks, joff, kxps, j, jk, k, k1;
float dnx, dny, dkx, dky, dkx2, dky2, dkxy, dkxy2, at1;
float complex zero, zt1, zt2, zt3;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
ks = kstrt - 1;
joff = kxp*ks;
kxps = nxh - joff;
kxps = 0 > kxps ? 0 : kxps;
kxps = kxp < kxps ? kxp : kxps;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
zero = 0.0 + 0.0*_Complex_I;
/* calculate transverse part of current */
if (kstrt > nxh)
return;
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
#pragma omp parallel for \
private(j,k,k1,jk,dkx,dkx2,dky,dky2,dkxy,dkxy2,at1,zt1,zt2,zt3)
for (j = 0; j < kxps; j++) {
dkx = dnx*(float) (j + joff);
dkx2 = dkx*dkx;
jk = nyv*j;
if ((j+joff) > 0) {
for (k = 1; k < nyh; k++) {
k1 = ny - k;
dky = dny*(float) k;
dky2 = dky*dky;
dkxy = dkx*dky;
dkxy2 = dky2 - dkx2;
at1 = 1.0/(dkx2 + dky2);
zt1 = cimagf(amu[4*k+4*jk])
- crealf(amu[4*k+4*jk])*_Complex_I;
zt2 = cimagf(amu[1+4*k+4*jk])
- crealf(amu[1+4*k+4*jk])*_Complex_I;
zt3 = at1*(dkxy*zt1 + dkxy2*zt2);
dcu[3*k+3*jk] = dky*zt3;
dcu[1+3*k+3*jk] = -dkx*zt3;
zt1 = cimagf(amu[2+4*k+4*jk])
- crealf(amu[2+4*k+4*jk])*_Complex_I;
zt2 = cimagf(amu[3+4*k+4*jk])
- crealf(amu[3+4*k+4*jk])*_Complex_I;
dcu[2+3*k+3*jk] = dkx*zt1 + dky*zt2;
zt1 = cimagf(amu[4*k1+4*jk])
- crealf(amu[4*k1+4*jk])*_Complex_I;
zt2 = cimagf(amu[1+4*k1+4*jk])
- crealf(amu[1+4*k1+4*jk])*_Complex_I;
zt3 = at1*(dkxy*zt1 - dkxy2*zt2);
dcu[3*k1+3*jk] = dky*zt3;
dcu[1+3*k1+3*jk] = dkx*zt3;
zt1 = cimagf(amu[2+4*k1+4*jk])
- crealf(amu[2+4*k1+4*jk])*_Complex_I;
zt2 = cimagf(amu[3+4*k1+4*jk])
- crealf(amu[3+4*k1+4*jk])*_Complex_I;
dcu[2+3*k1+3*jk] = dkx*zt1 - dky*zt2;
}
/* mode numbers ky = 0, ny/2 */
k1 = nyh;
zt2 = cimagf(amu[1+4*jk]) - crealf(amu[1+4*jk])*_Complex_I;
dcu[3*jk] = zero;
dcu[1+3*jk] = dkx*zt2;
zt1 = cimagf(amu[2+4*jk]) - crealf(amu[2+4*jk])*_Complex_I;
dcu[2+3*jk] = dkx*zt1;
dcu[3*k1+3*jk] = zero;
dcu[1+3*k1+3*jk] = zero;
dcu[2+3*k1+3*jk] = zero;
}
}
/* mode numbers kx = 0, nx/2 */
if (ks==0) {
for (k = 1; k < nyh; k++) {
k1 = ny - k;
dky = dny*(float) k;
zt2 = cimagf(amu[1+4*k]) - crealf(amu[1+4*k])*_Complex_I;
dcu[3*k] = dky*zt2;
dcu[1+3*k] = zero;
zt2 = cimagf(amu[3+4*k]) - crealf(amu[3+4*k])*_Complex_I;
dcu[2+3*k] = dky*zt2;
dcu[3*k1] = zero;
dcu[1+3*k1] = zero;
dcu[2+3*k1] = zero;
}
k1 = 3*nyh;
dcu[0] = zero;
dcu[1] = zero;
dcu[2] = zero;
dcu[k1] = zero;
dcu[1+k1] = zero;
dcu[2+k1] = zero;
}
return;
}
/*--------------------------------------------------------------------*/
void cmppadcuperp23(float complex dcu[], float complex amu[], int nx,
int ny, int kstrt, int nyv, int kxp) {
/* this subroutine calculates transverse part of the derivative of
the current density from the momentum flux and acceleration density
in 2-1/2d with periodic boundary conditions.
input: all, output: dcu
approximate flop count is: 65*nxc*nyc
and nxc*nyc divides
where nxc = (nx/2-1)/nvp, nyc = ny/2 - 1, and nvp = number of procs
the derivative of the current is calculated using the equations:
dcu[kx][ky][0] = dcu[kx][ky][0]-sqrt(-1)*(kx*vx*vx+ky*vx*vy)
dcu[kx][ky][1] = dcu[kx][ky][1]-sqrt(-1)*(kx*vx*vy+ky*vy*vy)
dcu[kx][ky][2] = dcu[kx][ky][2]-sqrt(-1)*(kx*vx*vz+ky*vy*vz)
where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers,
except for dcu(i,kx=pi) = dcu(i,ky=pi) = dcu(i,kx=0,ky=0) = 0.
the transverse part is calculated using the equation:
dcu[kx][ky][0] = dcu[kx][ky][0]-kx*(kx*dcu[kx][ky][0]
+ ky*dcu[kx][ky][1])/(kx*kx+ky*ky)
dcu[kx][ky][1] = dcu[kx][ky][1]-ky*(kx*dcu[kx][ky][0]
+ ky*dcu[kx][ky][1])/(kx*kx+ky*ky)
on input:
dcu[j][k][i] = complex acceleration density for fourier mode (jj,k1)
on output:
dcu[j][k][i] = i-th component of transverse part of complex derivative
of current for fourier mode (jj,k1), where jj = j + kxp*(kstrt - 1)
amu[j][k][0] = xx component of complex momentum flux
amu[j][k][1] = xy component of complex momentum flux
amu[j][k][2] = zx component of complex momentum flux
amu[j][k][3] = zy component of complex momentum flux
for fourier mode (jj,k), where jj = j + kxp*(kstrt - 1)
nx/ny = system length in x/y direction
kstrt = starting data block number
nyv = second dimension of field arrays, must be >= ny
kxp = number of data values per block
local data */
int nxh, nyh, ks, joff, kxps, j, jk, k, k1;
float dnx, dny, dkx, dky, dkx2, dky2, dkxy, dkxy2, at1;
float complex zero, zt1, zt2, zt3;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
ks = kstrt - 1;
joff = kxp*ks;
kxps = nxh - joff;
kxps = 0 > kxps ? 0 : kxps;
kxps = kxp < kxps ? kxp : kxps;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
zero = 0.0 + 0.0*_Complex_I;
/* calculate transverse part of current */
if (kstrt > nxh)
return;
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
#pragma omp parallel for \
private(j,k,k1,jk,dkx,dkx2,dky,dky2,dkxy,dkxy2,at1,zt1,zt2,zt3)
for (j = 0; j < kxps; j++) {
dkx = dnx*(float) (j + joff);
dkx2 = dkx*dkx;
jk = nyv*j;
if ((j+joff) > 0) {
for (k = 1; k < nyh; k++) {
k1 = ny - k;
dky = dny*(float) k;
dky2 = dky*dky;
dkxy = dkx*dky;
dkxy2 = dky2 - dkx2;
at1 = 1.0/(dkx2 + dky2);
zt1 = cimagf(amu[4*k+4*jk])
- crealf(amu[4*k+4*jk])*_Complex_I;
zt2 = cimagf(amu[1+4*k+4*jk])
- crealf(amu[1+4*k+4*jk])*_Complex_I;
zt3 = at1*(dky*dcu[3*k+3*jk] - dkx*dcu[1+3*k+3*jk]
+ dkxy*zt1 + dkxy2*zt2);
dcu[3*k+3*jk] = dky*zt3;
dcu[1+3*k+3*jk] = -dkx*zt3;
zt1 = cimagf(amu[2+4*k+4*jk])
- crealf(amu[2+4*k+4*jk])*_Complex_I;
zt2 = cimagf(amu[3+4*k+4*jk])
- crealf(amu[3+4*k+4*jk])*_Complex_I;
dcu[2+3*k+3*jk] += dkx*zt1 + dky*zt2;
zt1 = cimagf(amu[4*k1+4*jk])
- crealf(amu[4*k1+4*jk])*_Complex_I;
zt2 = cimagf(amu[1+4*k1+4*jk])
- crealf(amu[1+4*k1+4*jk])*_Complex_I;
zt3 = at1*(dky*dcu[3*k1+3*jk] + dkx*dcu[1+3*k1+3*jk]
+ dkxy*zt1 - dkxy2*zt2);
dcu[3*k1+3*jk] = dky*zt3;
dcu[1+3*k1+3*jk] = dkx*zt3;
zt1 = cimagf(amu[2+4*k1+4*jk])
- crealf(amu[2+4*k1+4*jk])*_Complex_I;
zt2 = cimagf(amu[3+4*k1+4*jk])
- crealf(amu[3+4*k1+4*jk])*_Complex_I;
dcu[2+3*k1+3*jk] += dkx*zt1 - dky*zt2;
}
/* mode numbers ky = 0, ny/2 */
k1 = nyh;
zt2 = cimagf(amu[1+4*jk]) - crealf(amu[1+4*jk])*_Complex_I;
dcu[3*jk] = zero;
dcu[1+3*jk] += dkx*zt2;
zt1 = cimagf(amu[2+4*jk]) - crealf(amu[2+4*jk])*_Complex_I;
dcu[2+3*jk] += dkx*zt1;
dcu[3*k1+3*jk] = zero;
dcu[1+3*k1+3*jk] = zero;
dcu[2+3*k1+3*jk] = zero;
}
}
/* mode numbers kx = 0, nx/2 */
if (ks==0) {
for (k = 1; k < nyh; k++) {
k1 = ny - k;
dky = dny*(float) k;
zt2 = cimagf(amu[1+4*k]) - crealf(amu[1+4*k])*_Complex_I;
dcu[3*k] += dky*zt2;
dcu[1+3*k] = zero;
zt2 = cimagf(amu[3+4*k]) - crealf(amu[3+4*k])*_Complex_I;
dcu[2+3*k] += dky*zt2;
dcu[3*k1] = zero;
dcu[1+3*k1] = zero;
dcu[2+3*k1] = zero;
}
k1 = 3*nyh;
dcu[0] = zero;
dcu[1] = zero;
dcu[k1] = zero;
dcu[1+k1] = zero;
dcu[2+k1] = zero;
}
return;
}
/*--------------------------------------------------------------------*/
void cmppepoisp23(float complex dcu[], float complex exy[], int isign,
float complex ffe[], float ax, float ay, float affp,
float wp0, float ci, float *wf, int nx, int ny,
int kstrt, int nyv, int kxp, int nyhd) {
/* this subroutine solves 2-1/2d poisson's equation in fourier space for
transverse electric field (or convolution of transverse electric field
over particle shape), with periodic boundary conditions.
using algorithm described in J. Busnardo-Neto, P. L. Pritchett,
A. T. Lin, and J. M. Dawson, J. Computational Phys. 23, 300 (1977).
for isign = 0, input: isign,ax,ay,affp,wp0,nx,ny,kstrt,nyv,kxp,nyhd,
output: ffe
for isign /= 0, input: dcu,ffe,isign,affp,ci,nx,ny,kstrt,nyv,kxp,nyhd,
output: exy,wf
approximate flop count is: 59*nxc*nyc + 32*(nxc + nyc)
where nxc = (nx/2-1)/nvp, nyc = ny/2 - 1, and nvp = number of procs
if isign = 0, form factor array is prepared
if isign = -1, smoothed transverse electric field is calculated
using the equations:
ex[ky][kx] = -ci*ci*g[ky][kx]*dcux[ky][kx]*s[ky][kx]
ey[ky][kx] = -ci*ci*g[ky][kx]*dcuy[ky][kx])*s[ky][kx]
ez[ky][kx] = -ci*ci*g[ky][kx]*dcuz[ky][kx]*s[ky][kx]
where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers,
g[ky][kx] = (affp/(kx**2+ky**2))*s[ky][kx],
s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2)/2), except for
ex(kx=pi) = ey(kx=pi) = ez(kx=pi) = 0,
ex(ky=pi) = ey(ky=pi) = ez(ky=pi) = 0,
ex(kx=0,ky=0) = ey(kx=0,ky=0) = ez(kx=0,ky=0) = 0.
if isign = 1, unsmoothed transverse electric field is calculated
using the equations:
ex[ky][kx] = -ci*ci*g[ky][kx]*dcux[ky][kx]
ey[ky][kx] = -ci*ci*g[ky][kx]*dcuy[ky][kx]
ez[ky][kx] = -ci*ci*g[ky][kx]*dcuz[ky][kx]
dcu[j][k][i] = i-th component of transverse part of complex derivative
of current,
exy[j][k][i] = i-th component of complex transverse electric field,
for fourier mode (jj,k), where jj = j + kxp*(kstrt - 1)
kxp = number of data values per block
kstrt = starting data block number
imag(ffe[j][k]) = finite-size particle shape factor s
real(ffe[j][k]) = potential green's function g
for fourier mode (jj,k), where jj = j + kxp*(kstrt - 1)
ax/ay = half-width of particle in x/y direction
affp = normalization constant = nx*ny/np, where np=number of particles
wp0 = normalized total plasma frequency squared
ci = reciprical of velocity of light
transverse electric field energy is also calculated, using
wf = nx*ny*sum((affp/((kx**2+ky**2)*ci*ci)**2)
|dcu[ky][kx]*s[ky][kx]|**2)
this expression is valid only if the derivative of current is
divergence-free
nx/ny = system length in x/y direction
nyv = second dimension of field arrays, must be >= ny
nyhd = first dimension of form factor array, must be >= nyh
local data */
int nxh, nyh, ks, joff, kxps, j, jj, jk, k, k1;
float dnx, dny, ci2, wpc, dkx, dky, at1, at2, at3, at4;
float complex zero;
double wp, sum1;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
ks = kstrt - 1;
joff = kxp*ks;
kxps = nxh - joff;
kxps = 0 > kxps ? 0 : kxps;
kxps = kxp < kxps ? kxp : kxps;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
zero = 0.0 + 0.0*_Complex_I;
ci2 = ci*ci;
if (isign != 0)
goto L30;
if (kstrt > nxh) return;
wpc = wp0*ci2;
/* prepare form factor array */
for (j = 0; j < kxps; j++) {
dkx = dnx*(float) (j + joff);
jj = nyhd*j;
at1 = dkx*dkx;
at2 = pow((dkx*ax),2);
for (k = 0; k < nyh; k++) {
dky = dny*(float) k;
at3 = dky*dky + at1;
at4 = exp(-.5*(pow((dky*ay),2) + at2));
if (at3==0.0) {
ffe[k+jj] = affp + 1.0*_Complex_I;
}
else {
ffe[k+jj] = (affp*at4/(at3 + wpc*at4*at4)) + at4*_Complex_I;
}
}
}
return;
/* calculate smoothed transverse electric field and sum field energy */
L30: if (isign > 0)
goto L80;
sum1 = 0.0;
if (kstrt > nxh)
goto L70;
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
#pragma omp parallel for private(j,k,k1,jj,jk,at1,at2,wp) \
reduction(+:sum1)
for (j = 0; j < kxps; j++) {
jj = nyhd*j;
jk = nyv*j;
wp = 0.0;
if ((j+joff) > 0) {
for (k = 1; k < nyh; k++) {
k1 = ny - k;
at2 = -ci2*crealf(ffe[k+jj]);
at1 = at2*cimagf(ffe[k+jj]);
at2 = at2*at2;
exy[3*k+3*jk] = at1*dcu[3*k+3*jk];
exy[1+3*k+3*jk] = at1*dcu[1+3*k+3*jk];
exy[2+3*k+3*jk] = at1*dcu[2+3*k+3*jk];
exy[3*k1+3*jk] = at1*dcu[3*k1+3*jk];
exy[1+3*k1+3*jk] = at1*dcu[1+3*k1+3*jk];
exy[2+3*k1+3*jk] = at1*dcu[2+3*k1+3*jk];
wp += at2*(dcu[3*k+3*jk]*conjf(dcu[3*k+3*jk])
+ dcu[1+3*k+3*jk]*conjf(dcu[1+3*k+3*jk])
+ dcu[2+3*k+3*jk]*conjf(dcu[2+3*k+3*jk])
+ dcu[3*k1+3*jk]*conjf(dcu[3*k1+3*jk])
+ dcu[1+3*k1+3*jk]*conjf(dcu[1+3*k1+3*jk])
+ dcu[2+3*k1+3*jk]*conjf(dcu[2+3*k1+3*jk]));
}
/* mode numbers ky = 0, ny/2 */
k1 = nyh;
at2 = -ci2*crealf(ffe[jj]);
at1 = at2*cimagf(ffe[jj]);
at2 = at2*at2;
exy[3*jk] = at1*dcu[3*jk];
exy[1+3*jk] = at1*dcu[1+3*jk];
exy[2+3*jk] = at1*dcu[2+3*jk];
exy[3*k1+3*jk] = zero;
exy[1+3*k1+3*jk] = zero;
exy[2+3*k1+3*jk] = zero;
wp += at2*(dcu[3*jk]*conjf(dcu[3*jk])
+ dcu[1+3*jk]*conjf(dcu[1+3*jk])
+ dcu[2+3*jk]*conjf(dcu[2+3*jk]));
}
sum1 += wp;
}
wp = 0.0;
/* mode numbers kx = 0, nx/2 */
if (ks==0) {
for (k = 1; k < nyh; k++) {
k1 = ny - k;
at2 = -ci2*crealf(ffe[k]);
at1 = at2*cimagf(ffe[k]);
at2 = at2*at2;
exy[3*k] = at1*dcu[3*k];
exy[1+3*k] = at1*dcu[1+3*k];
exy[2+3*k] = at1*dcu[2+3*k];
exy[3*k1] = zero;
exy[1+3*k1] = zero;
exy[2+3*k1] = zero;
wp += at2*(dcu[3*k]*conjf(dcu[3*k])
+ dcu[1+3*k]*conjf(dcu[1+3*k])
+ dcu[2+3*k]*conjf(dcu[2+3*k]));
}
k1 = 3*nyh;
exy[0] = zero;
exy[1] = zero;
exy[2] = zero;
exy[k1] = zero;
exy[1+k1] = zero;
exy[2+k1] = zero;
}
sum1 += wp;
L70:
*wf = sum1*((float) nx)*((float) ny)/affp;
return;
/* calculate unsmoothed transverse electric field and sum field energy */
L80: sum1 = 0.0;
if (kstrt > nxh)
goto L120;
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2*/
#pragma omp parallel for private(j,k,k1,jj,jk,at1,at2,wp) \
reduction(+:sum1)
for (j = 0; j < kxps; j++) {
jj = nyhd*j;
jk = nyv*j;
wp = 0.0;
if ((j+joff) > 0) {
for (k = 1; k < nyh; k++) {
k1 = ny - k;
at2 = -ci2*crealf(ffe[k+jj]);
at1 = at2*at2;
exy[3*k+3*jk] = at2*dcu[3*k+3*jk];
exy[1+3*k+3*jk] = at2*dcu[1+3*k+3*jk];
exy[2+3*k+3*jk] = at2*dcu[2+3*k+3*jk];
exy[3*k1+3*jk] = at2*dcu[3*k1+3*jk];
exy[1+3*k1+3*jk] = at2*dcu[1+3*k1+3*jk];
exy[2+3*k1+3*jk] = at2*dcu[2+3*k1+3*jk];
wp += at1*(dcu[3*k+3*jk]*conjf(dcu[3*k+3*jk])
+ dcu[1+3*k+3*jk]*conjf(dcu[1+3*k+3*jk])
+ dcu[2+3*k+3*jk]*conjf(dcu[2+3*k+3*jk])
+ dcu[3*k1+3*jk]*conjf(dcu[3*k1+3*jk])
+ dcu[1+3*k1+3*jk]*conjf(dcu[1+3*k1+3*jk])
+ dcu[2+3*k1+3*jk]*conjf(dcu[2+3*k1+3*jk]));
}
/* mode numbers ky = 0, ny/2 */
k1 = nyh;
at2 = -ci2*crealf(ffe[jj]);
at1 = at2*at2;
exy[3*jk] = at2*dcu[3*jk];
exy[1+3*jk] = at2*dcu[1+3*jk];
exy[2+3*jk] = at2*dcu[2+3*jk];
exy[3*k1+3*jk] = zero;
exy[1+3*k1+3*jk] = zero;
exy[2+3*k1+3*jk] = zero;
wp += at1*(dcu[3*jk]*conjf(dcu[3*jk])
+ dcu[1+3*jk]*conjf(dcu[1+3*jk])
+ dcu[2+3*jk]*conjf(dcu[2+3*jk]));
}
sum1 += wp;
}
wp = 0.0;
/* mode numbers kx = 0, nx/2 */
if (ks==0) {
for (k = 1; k < nyh; k++) {
k1 = ny - k;
at2 = -ci2*crealf(ffe[k]);
at1 = at2*at2;
exy[3*k] = at2*dcu[3*k];
exy[1+3*k] = at2*dcu[1+3*k];
exy[2+3*k] = at2*dcu[2+3*k];
exy[3*k1] = zero;
exy[1+3*k1] = zero;
exy[2+3*k1] = zero;
wp += at1*(dcu[3*k]*conjf(dcu[3*k])
+ dcu[1+3*k]*conjf(dcu[1+3*k])
+ dcu[2+3*k]*conjf(dcu[2+3*k]));
}
k1 = 3*nyh;
exy[0] = zero;
exy[1] = zero;
exy[2] = zero;
exy[k1] = zero;
exy[1+k1] = zero;
exy[2+k1] = zero;
}
sum1 += wp;
L120:
*wf = sum1*((float) nx)*((float) ny)/affp;
return;
}
/*--------------------------------------------------------------------*/
void cppaddvrfield2(float a[], float b[], float c[], int ndim, int nxe,
int nypmx) {
/* this subroutine calculates a = b + c for distributed real vector field
local data */
int i, j, k, nnxe;
nnxe = ndim*nxe;
#pragma omp parallel for private(i,j,k)
for (k = 0; k < nypmx; k++) {
for (j = 0; j < nxe; j++) {
for (i = 0; i < ndim; i++) {
a[i+ndim*j+nnxe*k] = b[i+ndim*j+nnxe*k]
+ c[i+ndim*j+nnxe*k];
}
}
}
return;
}
/*--------------------------------------------------------------------*/
void cwpfft2rinit(int mixup[], float complex sct[], int indx, int indy,
int nxhyd, int nxyhd) {
/* this subroutine calculates tables needed by a two dimensional
real to complex fast fourier transform and its inverse.
input: indx, indy, nxhyd, nxyhd
output: mixup, sct
mixup = array of bit reversed addresses
sct = sine/cosine table
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
nxhyd = maximum of (nx/2,ny)
nxyhd = one half of maximum of (nx,ny)
written by viktor k. decyk, ucla
local data */
int indx1, indx1y, nx, ny, nxy, nxhy, nxyh;
int j, k, lb, ll, jb, it;
float dnxy, arg;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
ny = 1L<<indy;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
/* bit-reverse index table: mixup[j] = 1 + reversed bits of j */
for (j = 0; j < nxhy; j++) {
lb = j;
ll = 0;
for (k = 0; k < indx1y; k++) {
jb = lb/2;
it = lb - 2*jb;
lb = jb;
ll = 2*ll + it;
}
mixup[j] = ll + 1;
}
/* sine/cosine table for the angles 2*n*pi/nxy */
nxyh = nxy/2;
dnxy = 6.28318530717959/(float) nxy;
for (j = 0; j < nxyh; j++) {
arg = dnxy*(float) j;
sct[j] = cosf(arg) - sinf(arg)*_Complex_I;
}
return;
}
/*--------------------------------------------------------------------*/
void cppfft2rmxx(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int kstrt,
int kypi, int kypp, int nxvh, int kypd, int nxhyd,
int nxyhd) {
/* this subroutine performs the x part of a two dimensional real to
complex fast fourier transform and its inverse, for a subset of y,
using complex arithmetic, with OpenMP,
for data which is distributed in blocks
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 10)/nvp
for isign = 1, approximate flop count: N*(5*log2(N) + 8)/nvp
where N = (nx/2)*ny, and nvp = number of procs
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, an inverse fourier transform is performed
f[m][n] = (1/nx*ny)*sum(f[k][j]*exp(-sqrt(-1)*2pi*n*j/nx)
if isign = 1, a forward fourier transform is performed
f[k][j] = sum(f[m][n]*exp(sqrt(-1)*2pi*n*j/nx)
kstrt = starting data block number
kypi = initial y index used
kypp = number of y indices used
nxvh = first dimension of f
kypd = second dimension of f
mixup = array of bit reversed addresses
sct = sine/cosine table
nxhyd = maximum of (nx/2,ny)
nxyhd = one half of maximum of (nx,ny)
the real data is stored in a complex array of length nx/2, ny
with the odd/even x points stored in the real/imaginary parts.
in complex notation, fourier coefficients are stored as follows:
f[k][j] = mode j,kk, where kk = k + kyp*(kstrt - 1)
0 <= j < nx/2 and 0 <= kk < ny, except for
f[k][0] = mode nx/2,kk, where ny/2+1 <= kk < ny, and
imaginary part of f[0][0] = real part of mode nx/2,0 on mode kstrt=0
imaginary part of f[0][0] = real part of mode nx/2,ny/2
on mode kstrt=(ny/2)/kyp
written by viktor k. decyk, ucla
parallel, RISC optimized version
local data */
int indx1, indx1y, nx, nxh, nxhh, ny;
int nxy, nxhy, kypt, j, k, nrx;
int i, m, ns, ns2, km, kmr, k1, k2, j1, j2, nrxb, joff;
float ani;
float complex s, t, t1;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
nxh = nx/2;
nxhh = nx/4;
ny = 1L<<indy;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
kypt = kypi + kypp - 1;
if (kstrt > ny)
return;
if (isign > 0)
goto L70;
/* inverse fourier transform */
ani = 0.5/(((float) nx)*((float) ny));
nrxb = nxhy/nxh;
nrx = nxy/nxh;
#pragma omp parallel for \
private(i,j,k,m,ns,ns2,km,kmr,k1,k2,j1,j2,joff,s,t,t1)
for (i = kypi-1; i < kypt; i++) {
joff = nxvh*i;
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
t = f[j1+joff];
f[j1+joff] = f[j+joff];
f[j+joff] = t;
}
}
/* then transform in x */
ns = 1;
for (m = 0; m < indx1; m++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = j + k1;
j2 = j + k2;
s = sct[kmr*j];
t = s*f[j2+joff];
f[j2+joff] = f[j1+joff] - t;
f[j1+joff] += t;
}
}
ns = ns2;
}
/* unscramble coefficients and normalize */
kmr = nxy/nx;
for (j = 1; j < nxhh; j++) {
t1 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I;
t = conjf(f[nxh-j+joff]);
s = f[j+joff] + t;
t = (f[j+joff] - t)*t1;
f[j+joff] = ani*(s + t);
f[nxh-j+joff] = ani*conjf(s - t);
}
f[joff] = 2.0*ani*((crealf(f[joff]) + cimagf(f[joff]))
+ (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I);
if (nxhh > 0)
f[nxhh+joff] = 2.0*ani*conjf(f[nxhh+joff]);
}
return;
/* forward fourier transform */
L70: nrxb = nxhy/nxh;
nrx = nxy/nxh;
#pragma omp parallel for \
private(i,j,k,m,ns,ns2,km,kmr,k1,k2,j1,j2,joff,s,t,t1)
for (i = kypi-1; i < kypt; i++) {
joff = nxvh*i;
/* scramble coefficients */
kmr = nxy/nx;
for (j = 1; j < nxhh; j++) {
t1 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I;
t = conjf(f[nxh-j+joff]);
s = f[j+joff] + t;
t = (f[j+joff] - t)*t1;
f[j+joff] = s + t;
f[nxh-j+joff] = conjf(s - t);
}
f[joff] = (crealf(f[joff]) + cimagf(f[joff]))
+ (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I;
if (nxhh > 0)
f[nxhh+joff] = 2.0*conjf(f[nxhh+joff]);
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
t = f[j1+joff];
f[j1+joff] = f[j+joff];
f[j+joff] = t;
}
}
/* then transform in x */
ns = 1;
for (m = 0; m < indx1; m++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = j + k1;
j2 = j + k2;
s = conjf(sct[kmr*j]);
t = s*f[j2+joff];
f[j2+joff] = f[j1+joff] - t;
f[j1+joff] += t;
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cppfft2rmxy(float complex g[], int isign, int mixup[],
float complex sct[], int indx, int indy, int kstrt,
int kxpi, int kxpp, int nyv, int kxp, int nxhyd,
int nxyhd) {
/* this subroutine performs the y part of a two dimensional real to
complex fast fourier transform and its inverse, for a subset of x,
using complex arithmetic, with OpenMP,
for data which is distributed in blocks
for isign = (-1,1), input: all, output: g
for isign = -1, approximate flop count: N*(5*log2(N) + 10)/nvp
for isign = 1, approximate flop count: N*(5*log2(N) + 8)/nvp
where N = (nx/2)*ny, and nvp = number of procs
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, an inverse fourier transform is performed
g[m][n] = sum(g[k][j]*exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, a forward fourier transform is performed
g[k][j] = sum(g[m][n]*exp(sqrt(-1)*2pi*m*k/ny))
kstrt = starting data block number
kxp = number of x indices per block
kxpi = initial x index used
kxpp = number of x indices used
nyv = first dimension of g
kxp = number of data values per block in x
mixup = array of bit reversed addresses
sct = sine/cosine table
nxhyd = maximum of (nx/2,ny)
nxyhd = one half of maximum of (nx,ny)
the real data is stored in a complex array of length nx/2, ny
with the odd/even x points stored in the real/imaginary parts.
in complex notation, fourier coefficients are stored as follows:
g[k][j] = mode jj,k, where jj = j + kxp*(kstrt - 1)
0 <= jj < nx/2 and 0 <= k < ny, except for
g[0][k] = mode nx/2,k, where ny/2+1 <= k < ny, and
imaginary part of g[0][0] = real part of mode nx/2,0 and
imaginary part of g[1][ny/2] = real part of mode nx/2,ny/2
on node kstrt=0
written by viktor k. decyk, ucla
parallel, RISC optimized version
local data */
int indx1, indx1y, nx, nxh, ny, nyh;
int nxy, nxhy, ks, kxpt, j, k, nry;
int i, m, ns, ns2, km, kmr, k1, k2, j1, j2, nryb, koff;
float complex s, t;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
nxh = nx/2;
ny = 1L<<indy;
nyh = ny/2;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
ks = kstrt - 1;
kxpt = kxpi + kxpp - 1;
if (kstrt > nxh)
return;
if (isign > 0)
goto L70;
/* inverse fourier transform */
nryb = nxhy/ny;
nry = nxy/ny;
#pragma omp parallel for \
private(i,j,k,m,ns,ns2,km,kmr,k1,k2,j1,j2,koff,s,t)
for (i = kxpi-1; i < kxpt; i++) {
koff = nyv*i;
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
t = g[k1+koff];
g[k1+koff] = g[k+koff];
g[k+koff] = t;
}
}
/* then transform in y */
ns = 1;
for (m = 0; m < indy; m++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = j + k1;
j2 = j + k2;
s = sct[kmr*j];
t = s*g[j2+koff];
g[j2+koff] = g[j1+koff] - t;
g[j1+koff] += t;
}
}
ns = ns2;
}
}
/* unscramble modes kx = 0, nx/2 */
if ((ks==0) && (kxpi==1)) {
for (k = 1; k < nyh; k++) {
s = g[ny-k];
g[ny-k] = 0.5*(cimagf(g[k] + s) + crealf(g[k] - s)*_Complex_I);
g[k] = 0.5*(crealf(g[k] + s) + cimagf(g[k] - s)*_Complex_I);
}
}
return;
/* forward fourier transform */
L70: nryb = nxhy/ny;
nry = nxy/ny;
/* scramble modes kx = 0, nx/2 */
if ((ks==0) && (kxpi==1)) {
for (k = 1; k < nyh; k++) {
s = cimagf(g[ny-k]) + crealf(g[ny-k])*_Complex_I;
g[ny-k] = conjf(g[k] - s);
g[k] += s;
}
}
#pragma omp parallel for \
private(i,j,k,m,ns,ns2,km,kmr,k1,k2,j1,j2,koff,s,t)
for (i = kxpi-1; i < kxpt; i++) {
koff = nyv*i;
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
t = g[k1+koff];
g[k1+koff] = g[k+koff];
g[k+koff] = t;
}
}
/* then transform in y */
ns = 1;
for (m = 0; m < indy; m++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = j + k1;
j2 = j + k2;
s = conjf(sct[kmr*j]);
t = s*g[j2+koff];
g[j2+koff] = g[j1+koff] - t;
g[j1+koff] += t;
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cppfft2rm3xx(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int kstrt,
int kypi, int kypp, int nxvh, int kypd, int nxhyd,
int nxyhd) {
/* this subroutine performs the x part of 3 two dimensional real to
complex fast fourier transforms and their inverses, for a subset of y,
using complex arithmetic, with OpenMP,
for data which is distributed in blocks
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 10)/nvp
for isign = 1, approximate flop count: N*(5*log2(N) + 8)/nvp
where N = (nx/2)*ny, and nvp = number of procs
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, an inverse fourier transform is performed
f[m][n][0:2] = (1/nx*ny)*sum(f[k][j][0:2]*exp(-sqrt(-1)*2pi*n*j/nx)
if isign = 1, a forward fourier transform is performed
f[k][j][0:2] = sum(f[m][n][0:2]*exp(sqrt(-1)*2pi*n*j/nx)*
kstrt = starting data block number
kypi = initial y index used
kypp = number of y indices used
nxvh = first dimension of f
kypd = second dimension of f
mixup = array of bit reversed addresses
sct = sine/cosine table
nxhyd = maximum of (nx/2,ny)
nxyhd = one half of maximum of (nx,ny)
the real data is stored in a complex array of length nx/2, ny
with the odd/even x points stored in the real/imaginary parts.
in complex notation, fourier coefficients are stored as follows:
f[k][j][0:2] = mode j,kk, where kk = k + kyp*(kstrt - 1)
0 <= j < nx/2 and 0 <= kk < ny, except for
f[k][0][0:2] = mode nx/2,kk, where ny/2+1 <= kk < ny, and
imaginary part of f[0][0][0:2] = real part of mode nx/2,0
on mode kstrt=0
imaginary part of f[0][0][0:2] = real part of mode nx/2,ny/2
on mode kstrt=(ny/2)/kyp
written by viktor k. decyk, ucla
parallel, RISC optimized version
local data */
int indx1, indx1y, nx, nxh, nxhh, ny;
int nxy, nxhy, kypt, j, k, nrx;
int i, m, ns, ns2, km, kmr, k1, k2, j1, j2, nrxb, joff;
float ani, at1, at2;
float complex s, t, t1, t2, t3;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
nxh = nx/2;
nxhh = nx/4;
ny = 1L<<indy;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
kypt = kypi + kypp - 1;
if (kstrt > ny)
return;
if (isign > 0)
goto L100;
/* inverse fourier transform */
ani = 0.5/(((float) nx)*((float) ny));
nrxb = nxhy/nxh;
nrx = nxy/nxh;
#pragma omp parallel for \
private(i,j,k,m,ns,ns2,km,kmr,k1,k2,j1,j2,joff,at1,at2,s,t,t1,t2,t3)
for (i = kypi-1; i < kypt; i++) {
joff = 3*nxvh*i;
/* swap complex components */
for (j = 0; j < nxh; j++) {
at1 = crealf(f[2+3*j+joff]);
f[2+3*j+joff] = crealf(f[1+3*j+joff])
+ cimagf(f[2+3*j+joff])*_Complex_I;
at2 = cimagf(f[1+3*j+joff]);
f[1+3*j+joff] = cimagf(f[3*j+joff]) + at1*_Complex_I;
f[3*j+joff] = crealf(f[3*j+joff]) + at2*_Complex_I;
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
t1 = f[3*j1+joff];
t2 = f[1+3*j1+joff];
t3 = f[2+3*j1+joff];
f[3*j1+joff] = f[3*j+joff];
f[1+3*j1+joff] = f[1+3*j+joff];
f[2+3*j1+joff] = f[2+3*j+joff];
f[3*j+joff] = t1;
f[1+3*j+joff] = t2;
f[2+3*j+joff] = t3;
}
}
/* then transform in x */
ns = 1;
for (m = 0; m < indx1; m++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = j + k1;
j2 = j + k2;
s = sct[kmr*j];
t1 = s*f[3*j2+joff];
t2 = s*f[1+3*j2+joff];
t3 = s*f[2+3*j2+joff];
f[3*j2+joff] = f[3*j1+joff] - t1;
f[1+3*j2+joff] = f[1+3*j1+joff] - t2;
f[2+3*j2+joff] = f[2+3*j1+joff] - t3;
f[3*j1+joff] += t1;
f[1+3*j1+joff] += t2;
f[2+3*j1+joff] += t3;
}
}
ns = ns2;
}
/* unscramble coefficients and normalize */
kmr = nxy/nx;
for (j = 1; j < nxhh; j++) {
t1 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I;
for (k = 0; k < 3; k++) {
t = conjf(f[k+3*(nxh-j)+joff]);
s = f[k+3*j+joff] + t;
t = (f[k+3*j+joff] - t)*t1;
f[k+3*j+joff] = ani*(s + t);
f[k+3*(nxh-j)+joff] = ani*conjf(s - t);
}
}
for (k = 0; k < 3; k++) {
f[k+joff] = 2.0*ani*((crealf(f[k+joff]) + cimagf(f[k+joff]))
+ (crealf(f[k+joff]) - cimagf(f[k+joff]))*_Complex_I);
if (nxhh > 0)
f[k+3*nxhh+joff] = 2.0*ani*conjf(f[k+3*nxhh+joff]);
}
}
return;
/* forward fourier transform */
L100: nrxb = nxhy/nxh;
nrx = nxy/nxh;
#pragma omp parallel for \
private(i,j,k,m,ns,ns2,km,kmr,k1,k2,j1,j2,joff,at1,at2,s,t,t1,t2,t3)
for (i = kypi-1; i < kypt; i++) {
joff = 3*nxvh*i;
/* scramble coefficients */
kmr = nxy/nx;
for (j = 1; j < nxhh; j++) {
t1 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I;
for (k = 0; k < 3; k++) {
t = conjf(f[k+3*(nxh-j)+joff]);
s = f[k+3*j+joff] + t;
t = (f[k+3*j+joff] - t)*t1;
f[k+3*j+joff] = s + t;
f[k+3*(nxh-j)+joff] = conjf(s - t);
}
}
for (k = 0; k < 3; k++) {
f[k+joff] = (crealf(f[k+joff]) + cimagf(f[k+joff]))
+ (crealf(f[k+joff]) - cimagf(f[k+joff]))*_Complex_I;
if (nxhh > 0)
f[k+3*nxhh+joff] = 2.0*conjf(f[k+3*nxhh+joff]);
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
t1 = f[3*j1+joff];
t2 = f[1+3*j1+joff];
t3 = f[2+3*j1+joff];
f[3*j1+joff] = f[3*j+joff];
f[1+3*j1+joff] = f[1+3*j+joff];
f[2+3*j1+joff] = f[2+3*j+joff];
f[3*j+joff] = t1;
f[1+3*j+joff] = t2;
f[2+3*j+joff] = t3;
}
}
/* then transform in x */
ns = 1;
for (m = 0; m < indx1; m++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = j + k1;
j2 = j + k2;
s = conjf(sct[kmr*j]);
t1 = s*f[3*j2+joff];
t2 = s*f[1+3*j2+joff];
t3 = s*f[2+3*j2+joff];
f[3*j2+joff] = f[3*j1+joff] - t1;
f[1+3*j2+joff] = f[1+3*j1+joff] - t2;
f[2+3*j2+joff] = f[2+3*j1+joff] - t3;
f[3*j1+joff] += t1;
f[1+3*j1+joff] += t2;
f[2+3*j1+joff] += t3;
}
}
ns = ns2;
}
/* swap complex components */
for (j = 0; j < nxh; j++) {
at1 = crealf(f[2+3*j+joff]);
f[2+3*j+joff] = cimagf(f[1+3*j+joff])
+ cimagf(f[2+3*j+joff])*_Complex_I;
at2 = crealf(f[1+3*j+joff]);
f[1+3*j+joff] = at1 + cimagf(f[3*j+joff])*_Complex_I;
f[3*j+joff] = crealf(f[3*j+joff]) + at2*_Complex_I;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cppfft2rm3xy(float complex g[], int isign, int mixup[],
float complex sct[], int indx, int indy, int kstrt,
int kxpi, int kxpp, int nyv, int kxp, int nxhyd,
int nxyhd) {
/* this subroutine performs the y part of 3 two dimensional real to
complex fast fourier transforms and their inverses, for a subset of x,
using complex arithmetic, with OpenMP,
for data which is distributed in blocks
for isign = (-1,1), input: all, output: g
for isign = -1, approximate flop count: N*(5*log2(N) + 10)/nvp
for isign = 1, approximate flop count: N*(5*log2(N) + 8)/nvp
where N = (nx/2)*ny, and nvp = number of procs
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, an inverse fourier transform is performed
g[n][m][0:2] = sum(g[j][k][0:2]*exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, a forward fourier transform is performed
g[j][k][0:2] = sum(g[n][m][0:2]*exp(sqrt(-1)*2pi*m*k/ny))
kstrt = starting data block number
kxpi = initial x index used
kxpp = number of x indices used
nyv = first dimension of g
kxp = number of data values per block in x
mixup = array of bit reversed addresses
sct = sine/cosine table
nxhyd = maximum of (nx/2,ny)
nxyhd = one half of maximum of (nx,ny)
the real data is stored in a complex array of length nx/2, ny
with the odd/even x points stored in the real/imaginary parts.
in complex notation, fourier coefficients are stored as follows:
g[j][k][0:2] = mode jj,k, where jj = j + kxp*(kstrt - 1)
0 <= jj < nx/2 and 0 <= k < ny, except for
g[0][k][0:2] = mode nx/2,k, where ny/2+1 <= k < ny, and
imaginary part of g[0][0][0:2] = real part of mode nx/2,0 and
imaginary part of g[0][ny/2][0:2] = real part of mode nx/2,ny/2
on node kstrt=0
written by viktor k. decyk, ucla
parallel, RISC optimized version
local data */
int indx1, indx1y, nx, nxh, ny, nyh;
int nxy, nxhy, ks, kxpt, j, k, nry;
int i, m, ns, ns2, km, kmr, k1, k2, j1, j2, nryb, koff;
float complex s, t1, t2, t3;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
nxh = nx/2;
ny = 1L<<indy;
nyh = ny/2;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
ks = kstrt - 1;
kxpt = kxpi + kxpp - 1;
if (kstrt > nxh)
return;
if (isign > 0)
goto L80;
/* inverse fourier transform */
nryb = nxhy/ny;
nry = nxy/ny;
#pragma omp parallel for \
private(i,j,k,m,ns,ns2,km,kmr,k1,k2,j1,j2,koff,s,t1,t2,t3)
for (i = kxpi-1; i < kxpt; i++) {
koff = 3*nyv*i;
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
t1 = g[3*k1+koff];
t2 = g[1+3*k1+koff];
t3 = g[2+3*k1+koff];
g[3*k1+koff] = g[3*k+koff];
g[1+3*k1+koff] = g[1+3*k+koff];
g[2+3*k1+koff] = g[2+3*k+koff];
g[3*k+koff] = t1;
g[1+3*k+koff] = t2;
g[2+3*k+koff] = t3;
}
}
/* then transform in y */
ns = 1;
for (m = 0; m < indy; m++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = j + k1;
j2 = j + k2;
s = sct[kmr*j];
t1 = s*g[3*j2+koff];
t2 = s*g[1+3*j2+koff];
t3 = s*g[2+3*j2+koff];
g[3*j2+koff] = g[3*j1+koff] - t1;
g[1+3*j2+koff] = g[1+3*j1+koff] - t2;
g[2+3*j2+koff] = g[2+3*j1+koff] - t3;
g[3*j1+koff] += t1;
g[1+3*j1+koff] += t2;
g[2+3*j1+koff] += t3;
}
}
ns = ns2;
}
}
/* unscramble modes kx = 0, nx/2 */
if ((ks==0) && (kxpi==1)) {
for (k = 1; k < nyh; k++) {
for (j = 0; j < 3; j++) {
s = g[j+3*(ny-k)];
g[j+3*(ny-k)] = 0.5*(cimagf(g[j+3*k] + s)
+ crealf(g[j+3*k] - s)*_Complex_I);
g[j+3*k] = 0.5*(crealf(g[j+3*k] + s)
+ cimagf(g[j+3*k] - s)*_Complex_I);
}
}
}
return;
/* forward fourier transform */
L80: nryb = nxhy/ny;
nry = nxy/ny;
/* scramble modes kx = 0, nx/2 */
if ((ks==0) && (kxpi==1)) {
for (k = 1; k < nyh; k++) {
for (j = 0; j < 3; j++) {
s = cimagf(g[j+3*(ny-k)])
+ crealf(g[j+3*(ny-k)])*_Complex_I;
g[j+3*(ny-k)] = conjf(g[j+3*k] - s);
g[j+3*k] += s;
}
}
}
#pragma omp parallel for \
private(i,j,k,m,ns,ns2,km,kmr,k1,k2,j1,j2,koff,s,t1,t2,t3)
for (i = kxpi-1; i < kxpt; i++) {
koff = 3*nyv*i;
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
t1 = g[3*k1+koff];
t2 = g[1+3*k1+koff];
t3 = g[2+3*k1+koff];
g[3*k1+koff] = g[3*k+koff];
g[1+3*k1+koff] = g[1+3*k+koff];
g[2+3*k1+koff] = g[2+3*k+koff];
g[3*k+koff] = t1;
g[1+3*k+koff] = t2;
g[2+3*k+koff] = t3;
}
}
/* then transform in y */
ns = 1;
for (m = 0; m < indy; m++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = j + k1;
j2 = j + k2;
s = conjf(sct[kmr*j]);
t1 = s*g[3*j2+koff];
t2 = s*g[1+3*j2+koff];
t3 = s*g[2+3*j2+koff];
g[3*j2+koff] = g[3*j1+koff] - t1;
g[1+3*j2+koff] = g[1+3*j1+koff] - t2;
g[2+3*j2+koff] = g[2+3*j1+koff] - t3;
g[3*j1+koff] += t1;
g[1+3*j1+koff] += t2;
g[2+3*j1+koff] += t3;
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cmppswapc2n(float f[], float s[], int isign, int nxh, int kypi,
int kypt, int nxvh, int kypd, int ndim) {
/* this subroutine swaps components for multiple ffts
f = input array
s = scratch array
isign = (-1,1) = swap (real-to-complex,complex-to-real)
nxh = complex dimension in x direction
kypi/kypt = initial/final y index used
nxvh = half of the second dimension of f
kypd = third dimension of f
ndim = leading dimension of array f
local data */
int i, j, k, ioff, nk;
/* swap complex components */
/* real to complex */
if (isign < 0){
#pragma omp parallel for private(i,j,k,ioff)
for (k = kypi-1; k < kypt; k++) {
nk = 2*ndim*nxvh*k;
for (j = 0; j < nxh; j++) {
ioff = 2*ndim*j;
for (i = 0; i < ndim; i++) {
s[2*i+ioff+nk] = f[i+ndim*(2*j)+nk];
s[2*i+ioff+1+nk] = f[i+ndim*(2*j+1)+nk];
}
}
for (j = 0; j < nxh; j++) {
ioff = 2*ndim*j;
for (i = 0; i < ndim; i++) {
f[i+ndim*(2*j)+nk] = s[i+ioff+nk];
}
ioff += ndim;
for (i = 0; i < ndim; i++) {
f[i+ndim*(2*j+1)+nk] = s[i+ioff+nk];
}
}
}
}
/* complex to real */
else if (isign > 0) {
#pragma omp parallel for private(i,j,k,ioff)
for (k = kypi-1; k < kypt; k++) {
nk = 2*ndim*nxvh*k;
for (j = 0; j < nxh; j++) {
ioff = 2*ndim*j;
for (i = 0; i < ndim; i++) {
s[i+ioff+nk] = f[i+ndim*(2*j)+nk];
}
ioff += ndim;
for (i = 0; i < ndim; i++) {
s[i+ioff+nk] = f[i+ndim*(2*j+1)+nk];
}
}
for (j = 0; j < nxh; j++) {
ioff = 2*ndim*j;
for (i = 0; i < ndim; i++) {
f[i+ndim*(2*j)+nk] = s[2*i+ioff+nk];
f[i+ndim*(2*j+1)+nk] = s[2*i+ioff+1+nk];
}
}
}
}
return;
}
/*--------------------------------------------------------------------*/
void cppfft2rmnxx(float complex f[], float complex ss[], int isign,
int mixup[], float complex sct[], int indx, int indy,
int kstrt, int kypi, int kypp, int nxvh, int kypd,
int ndim, int nxhyd, int nxyhd) {
/* this subroutine performs the x part of N two dimensional real to
complex fast fourier transforms and their inverses, for a subset of y,
using complex arithmetic, where N = ndim, with OpenMP,
for data which is distributed in blocks
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: M*(5*log2(M) + 10)/nvp
for isign = 1, approximate flop count: M*(5*log2(M) + 8)/nvp
where M = (nx/2)*ny, and nvp = number of procs
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, an inverse fourier transform is performed
f[m][n][0:2] = (1/nx*ny)*sum(f[k][j][0:2]*exp(-sqrt(-1)*2pi*n*j/nx)
if isign = 1, a forward fourier transform is performed
f[k][j][0:2] = sum(f[m][n][0:2]*exp(sqrt(-1)*2pi*n*j/nx)
kstrt = starting data block number
kypi = initial y index used
kypp = number of y indices used
nxvh = second dimension of f
kypd = third dimension of f
ss = scratch array
mixup = array of bit reversed addresses
sct = sine/cosine table
ndim = leading dimension of arrays f and g
nxhyd = maximum of (nx/2,ny)
nxyhd = one half of maximum of (nx,ny)
the real data is stored in a complex array of length nx/2, ny
with the odd/even x points stored in the real/imaginary parts.
in complex notation, fourier coefficients are stored as follows:
f[k][j][0:N-1] = mode j,kk, where kk = k + kyp*(kstrt - 1)
0 <= j < nx/2 and 0 <= kk < ny, except for
f[k][0][0:N-1] = mode nx/2,kk, where ny/2+1 <= kk < ny, and
imaginary part of f[0][0][0:N-1] = real part of mode nx/2,0
on mode kstrt=0
imaginary part of f[0][0][0:N-1] = real part of mode nx/2,ny/2
on mode kstrt=(ny/2)/kyp
written by viktor k. decyk, ucla
parallel, RISC optimized version
local data */
int indx1, indx1y, nx, nxh, nxhh, ny;
int nxy, nxhy, kypt, j, k, nrx;
int i, m, ns, ns2, km, kmr, k1, k2, j1, j2, jj, nrxb, joff;
float ani;
float complex s, t, t1;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
nxh = nx/2;
nxhh = nx/4;
ny = 1L<<indy;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
kypt = kypi + kypp - 1;
if (kstrt > ny)
return;
if (isign > 0)
goto L110;
/* inverse fourier transform */
ani = 0.5/(((float) nx)*((float) ny));
nrxb = nxhy/nxh;
nrx = nxy/nxh;
/* swap complex components */
cmppswapc2n((float *)f,(float *)ss,isign,nxh,kypi,kypt,nxvh,kypd,
ndim);
#pragma omp parallel for \
private(i,j,k,m,ns,ns2,km,kmr,k1,k2,jj,j1,j2,joff,s,t,t1)
for (i = kypi-1; i < kypt; i++) {
joff = ndim*nxvh*i;
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
for (jj = 0; jj < ndim; jj++) {
t1 = f[jj+ndim*j1+joff];
f[jj+ndim*j1+joff] = f[jj+ndim*j+joff];
f[jj+ndim*j+joff] = t1;
}
}
}
/* then transform in x */
ns = 1;
for (m = 0; m < indx1; m++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = j + k1;
j2 = j + k2;
s = sct[kmr*j];
for (jj = 0; jj < ndim; jj++) {
t1 = s*f[jj+ndim*j2+joff];
f[jj+ndim*j2+joff] = f[jj+ndim*j1+joff] - t1;
f[jj+ndim*j1+joff] += t1;
}
}
}
ns = ns2;
}
/* unscramble coefficients and normalize */
kmr = nxy/nx;
for (j = 1; j < nxhh; j++) {
t1 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I;
for (k = 0; k < ndim; k++) {
t = conjf(f[k+ndim*(nxh-j)+joff]);
s = f[k+ndim*j+joff] + t;
t = (f[k+ndim*j+joff] - t)*t1;
f[k+ndim*j+joff] = ani*(s + t);
f[k+ndim*(nxh-j)+joff] = ani*conjf(s - t);
}
}
for (k = 0; k < ndim; k++) {
f[k+joff] = 2.0*ani*((crealf(f[k+joff]) + cimagf(f[k+joff]))
+ (crealf(f[k+joff]) - cimagf(f[k+joff]))*_Complex_I);
if (nxhh > 0)
f[k+ndim*nxhh+joff] = 2.0*ani*conjf(f[k+ndim*nxhh+joff]);
}
}
return;
/* forward fourier transform */
L110: nrxb = nxhy/nxh;
nrx = nxy/nxh;
#pragma omp parallel for \
private(i,j,k,m,ns,ns2,km,kmr,k1,k2,jj,j1,j2,joff,s,t,t1)
for (i = kypi-1; i < kypt; i++) {
joff = ndim*nxvh*i;
/* scramble coefficients */
kmr = nxy/nx;
for (j = 1; j < nxhh; j++) {
t1 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I;
for (k = 0; k < ndim; k++) {
t = conjf(f[k+ndim*(nxh-j)+joff]);
s = f[k+ndim*j+joff] + t;
t = (f[k+ndim*j+joff] - t)*t1;
f[k+ndim*j+joff] = s + t;
f[k+ndim*(nxh-j)+joff] = conjf(s - t);
}
}
for (k = 0; k < ndim; k++) {
f[k+joff] = (crealf(f[k+joff]) + cimagf(f[k+joff]))
+ (crealf(f[k+joff]) - cimagf(f[k+joff]))*_Complex_I;
if (nxhh > 0)
f[k+ndim*nxhh+joff] = 2.0*conjf(f[k+ndim*nxhh+joff]);
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
for (jj = 0; jj < ndim; jj++) {
t1 = f[jj+ndim*j1+joff];
f[jj+ndim*j1+joff] = f[jj+ndim*j+joff];
f[jj+ndim*j+joff] = t1;
}
}
}
/* then transform in x */
ns = 1;
for (m = 0; m < indx1; m++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = j + k1;
j2 = j + k2;
s = conjf(sct[kmr*j]);
for (jj = 0; jj < ndim; jj++) {
t1 = s*f[jj+ndim*j2+joff];
f[jj+ndim*j2+joff] = f[jj+ndim*j1+joff] - t1;
f[jj+ndim*j1+joff] += t1;
}
}
}
ns = ns2;
}
}
/* swap complex components */
cmppswapc2n((float *)f,(float *)ss,isign,nxh,kypi,kypt,nxvh,kypd,
ndim);
return;
}
/*--------------------------------------------------------------------*/
void cppfft2rmnxy(float complex g[], int isign, int mixup[],
float complex sct[], int indx, int indy, int kstrt,
int kxpi, int kxpp, int nyv, int kxp, int ndim,
int nxhyd, int nxyhd) {
/* this subroutine performs the y part of N two dimensional real to
complex fast fourier transforms and their inverses, for a subset of x,
using complex arithmetic, where N = ndim, with OpenMP,
for data which is distributed in blocks
for isign = (-1,1), input: all, output: g
for isign = -1, approximate flop count: M*(5*log2(M) + 10)/nvp
for isign = 1, approximate flop count: M*(5*log2(M) + 8)/nvp
where M = (nx/2)*ny, and nvp = number of procs
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, an inverse fourier transform is performed
g[n][m][0:N-1] = sum(g[j][k][0:N-1]*exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, a forward fourier transform is performed
g[j][k][0:N-1] = sum(g[n][m][0:N-1]*exp(sqrt(-1)*2pi*m*k/ny))
kstrt = starting data block number
kxpi = initial x index used
kxpp = number of x indices used
nyv = first dimension of g
kxp = number of data values per block in x
ndim = leading dimension of arrays f and g
mixup = array of bit reversed addresses
sct = sine/cosine table
nxhyd = maximum of (nx/2,ny)
nxyhd = one half of maximum of (nx,ny)
the real data is stored in a complex array of length nx/2, ny
with the odd/even x points stored in the real/imaginary parts.
in complex notation, fourier coefficients are stored as follows:
g[j][k][0:N-1] = mode jj,k, where jj = j + kxp*(kstrt - 1)
0 <= jj < nx/2 and 0 <= k < ny, except for
g[0][k][0:N-1] = mode nx/2,k, where ny/2+1 <= k < ny, and
imaginary part of g[0][0][0:N-1] = real part of mode nx/2,0 and
imaginary part of g[0][ny/2][0:N-1] = real part of mode nx/2,ny/2
on node kstrt=0
written by viktor k. decyk, ucla
parallel, RISC optimized version
local data */
int indx1, indx1y, nx, nxh, ny, nyh;
int nxy, nxhy, ks, kxpt, j, k, nry;
int i, m, ns, ns2, km, kmr, k1, k2, j1, j2, jj, nryb, koff;
float complex s, t1;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
nxh = nx/2;
ny = 1L<<indy;
nyh = ny/2;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
ks = kstrt - 1;
kxpt = kxpi + kxpp - 1;
if (kstrt > nxh)
return;
if (isign > 0)
goto L100;
/* inverse fourier transform */
nryb = nxhy/ny;
nry = nxy/ny;
#pragma omp parallel for \
private(i,j,k,m,ns,ns2,km,kmr,k1,k2,jj,j1,j2,koff,s,t1)
for (i = kxpi-1; i < kxpt; i++) {
koff = ndim*nyv*i;
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
for (jj = 0; jj < ndim; jj++) {
t1 = g[jj+ndim*k1+koff];
g[jj+ndim*k1+koff] = g[jj+ndim*k+koff];
g[jj+ndim*k+koff] = t1;
}
}
}
/* then transform in y */
ns = 1;
for (m = 0; m < indy; m++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = j + k1;
j2 = j + k2;
s = sct[kmr*j];
for (jj = 0; jj < ndim; jj++) {
t1 = s*g[jj+ndim*j2+koff];
g[jj+ndim*j2+koff] = g[jj+ndim*j1+koff] - t1;
g[jj+ndim*j1+koff] += t1;
}
}
}
ns = ns2;
}
}
/* unscramble modes kx = 0, nx/2 */
if ((ks==0) && (kxpi==1)) {
for (k = 1; k < nyh; k++) {
for (j = 0; j < ndim; j++) {
s = g[j+ndim*(ny-k)];
g[j+ndim*(ny-k)] = 0.5*(cimagf(g[j+ndim*k] + s)
+ crealf(g[j+ndim*k] - s)*_Complex_I);
g[j+ndim*k] = 0.5*(crealf(g[j+ndim*k] + s)
+ cimagf(g[j+ndim*k] - s)*_Complex_I);
}
}
}
return;
/* forward fourier transform */
L100: nryb = nxhy/ny;
nry = nxy/ny;
/* scramble modes kx = 0, nx/2 */
if ((ks==0) && (kxpi==1)) {
for (k = 1; k < nyh; k++) {
for (j = 0; j < ndim; j++) {
s = cimagf(g[j+ndim*(ny-k)])
+ crealf(g[j+ndim*(ny-k)])*_Complex_I;
g[j+ndim*(ny-k)] = conjf(g[j+ndim*k] - s);
g[j+ndim*k] += s;
}
}
}
#pragma omp parallel for \
private(i,j,k,m,ns,ns2,km,kmr,k1,k2,jj,j1,j2,koff,s,t1)
for (i = kxpi-1; i < kxpt; i++) {
koff = ndim*nyv*i;
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
for (jj = 0; jj < ndim; jj++) {
t1 = g[jj+ndim*k1+koff];
g[jj+ndim*k1+koff] = g[jj+ndim*k+koff];
g[jj+ndim*k+koff] = t1;
}
}
}
/* then transform in y */
ns = 1;
for (m = 0; m < indy; m++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = j + k1;
j2 = j + k2;
s = conjf(sct[kmr*j]);
for (jj = 0; jj < ndim; jj++) {
t1 = s*g[jj+ndim*j2+koff];
g[jj+ndim*j2+koff] = g[jj+ndim*j1+koff] - t1;
g[jj+ndim*j1+koff] += t1;
}
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cwppfft2rm(float complex f[], float complex g[],
float complex bs[], float complex br[], int isign,
int ntpose, int mixup[], float complex sct[],
float *ttp, int indx, int indy, int kstrt, int nvp,
int nxvh, int nyv, int kxp, int kyp, int kypd,
int nxhyd, int nxyhd) {
/* wrapper function for parallel real to complex fft */
/* parallelized with OpenMP */
/* local data */
int nxh, ny, ks, kxpp, kypp;
static int kxpi = 1, kypi = 1;
float tf;
double dtime;
/* calculate range of indices */
nxh = 1L<<(indx - 1);
ny = 1L<<indy;
ks = kstrt - 1;
kxpp = nxh - kxp*ks;
kxpp = 0 > kxpp ? 0 : kxpp;
kxpp = kxp < kxpp ? kxp : kxpp;
kypp = ny - kyp*ks;
kypp = 0 > kypp ? 0 : kypp;
kypp = kyp < kypp ? kyp : kypp;
/* inverse fourier transform */
if (isign < 0) {
/* perform x fft */
cppfft2rmxx(f,isign,mixup,sct,indx,indy,kstrt,kypi,kypp,nxvh,kypd,
nxhyd,nxyhd);
/* transpose f array to g */
cpwtimera(-1,ttp,&dtime);
cpptpose(f,g,bs,br,nxh,ny,kxp,kyp,kstrt,nvp,nxvh,nyv,kxp,kypd);
cpwtimera(1,ttp,&dtime);
/* perform y fft */
cppfft2rmxy(g,isign,mixup,sct,indx,indy,kstrt,kxpi,kxpp,nyv,kxp,
nxhyd,nxyhd);
/* transpose g array to f */
if (ntpose==0) {
cpwtimera(-1,&tf,&dtime);
cpptpose(g,f,br,bs,ny,nxh,kyp,kxp,kstrt,nvp,nyv,nxvh,kypd,kxp);
cpwtimera(1,&tf,&dtime);
}
}
/* forward fourier transform */
else if (isign > 0) {
/* transpose f array to g */
if (ntpose==0) {
cpwtimera(-1,&tf,&dtime);
cpptpose(f,g,bs,br,nxh,ny,kxp,kyp,kstrt,nvp,nxvh,nyv,kxp,kypd);
cpwtimera(1,&tf,&dtime);
}
/* perform y fft */
cppfft2rmxy(g,isign,mixup,sct,indx,indy,kstrt,kxpi,kxpp,nyv,kxp,
nxhyd,nxyhd);
/* transpose g array to f */
cpwtimera(-1,ttp,&dtime);
cpptpose(g,f,br,bs,ny,nxh,kyp,kxp,kstrt,nvp,nyv,nxvh,kypd,kxp);
cpwtimera(1,ttp,&dtime);
/* perform x fft */
cppfft2rmxx(f,isign,mixup,sct,indx,indy,kstrt,kypi,kypp,nxvh,kypd,
nxhyd,nxyhd);
}
if (ntpose==0)
*ttp += tf;
return;
}
/*--------------------------------------------------------------------*/
void cwppfft2rm3(float complex f[], float complex g[],
float complex bs[], float complex br[], int isign,
int ntpose, int mixup[], float complex sct[],
float *ttp, int indx, int indy, int kstrt, int nvp,
int nxvh, int nyv, int kxp, int kyp, int kypd,
int nxhyd, int nxyhd) {
/* wrapper function for parallel real to complex fft */
/* parallelized with OpenMP */
/* local data */
int nxh, ny, ks, kxpp, kypp;
static int kxpi = 1, kypi = 1;
float tf;
double dtime;
/* calculate range of indices */
nxh = 1L<<(indx - 1);
ny = 1L<<indy;
ks = kstrt - 1;
kxpp = nxh - kxp*ks;
kxpp = 0 > kxpp ? 0 : kxpp;
kxpp = kxp < kxpp ? kxp : kxpp;
kypp = ny - kyp*ks;
kypp = 0 > kypp ? 0 : kypp;
kypp = kyp < kypp ? kyp : kypp;
/* inverse fourier transform */
if (isign < 0) {
/* perform x fft */
cppfft2rm3xx(f,isign,mixup,sct,indx,indy,kstrt,kypi,kypp,nxvh,
kypd,nxhyd,nxyhd);
/* transpose f array to g */
cpwtimera(-1,ttp,&dtime);
cppntpose(f,g,bs,br,nxh,ny,kxp,kyp,kstrt,nvp,3,nxvh,nyv,kxp,kypd);
cpwtimera(1,ttp,&dtime);
/* perform y fft */
cppfft2rm3xy(g,isign,mixup,sct,indx,indy,kstrt,kxpi,kxpp,nyv,kxp,
nxhyd,nxyhd);
/* transpose g array to f */
if (ntpose==0) {
cpwtimera(-1,&tf,&dtime);
cppntpose(g,f,br,bs,ny,nxh,kyp,kxp,kstrt,nvp,3,nyv,nxvh,kypd,
kxp);
cpwtimera(1,&tf,&dtime);
}
}
/* forward fourier transform */
else if (isign > 0) {
/* transpose f array to g */
if (ntpose==0) {
cpwtimera(-1,&tf,&dtime);
cppntpose(f,g,bs,br,nxh,ny,kxp,kyp,kstrt,nvp,3,nxvh,nyv,kxp,
kypd);
cpwtimera(1,&tf,&dtime);
}
/* perform y fft */
cppfft2rm3xy(g,isign,mixup,sct,indx,indy,kstrt,kxpi,kxpp,nyv,kxp,
nxhyd,nxyhd);
/* transpose g array to f */
cpwtimera(-1,ttp,&dtime);
cppntpose(g,f,br,bs,ny,nxh,kyp,kxp,kstrt,nvp,3,nyv,nxvh,kypd,kxp);
cpwtimera(1,ttp,&dtime);
/* perform x fft */
cppfft2rm3xx(f,isign,mixup,sct,indx,indy,kstrt,kypi,kypp,nxvh,
kypd,nxhyd,nxyhd);
}
if (ntpose==0)
*ttp += tf;
return;
}
/*--------------------------------------------------------------------*/
void cwppfft2rmn(float complex f[], float complex g[],
float complex bs[], float complex br[],
float complex ss[], int isign, int ntpose, int mixup[],
float complex sct[], float *ttp, int indx, int indy,
int kstrt, int nvp, int nxvh, int nyv, int kxp,
int kyp, int kypd, int ndim, int nxhyd, int nxyhd) {
/* wrapper function for parallel real to complex fft */
/* parallelized with OpenMP */
/* local data */
int nxh, ny, ks, kxpp, kypp;
static int kxpi = 1, kypi = 1;
float tf;
double dtime;
/* calculate range of indices */
nxh = 1L<<(indx - 1);
ny = 1L<<indy;
ks = kstrt - 1;
kxpp = nxh - kxp*ks;
kxpp = 0 > kxpp ? 0 : kxpp;
kxpp = kxp < kxpp ? kxp : kxpp;
kypp = ny - kyp*ks;
kypp = 0 > kypp ? 0 : kypp;
kypp = kyp < kypp ? kyp : kypp;
/* inverse fourier transform */
if (isign < 0) {
/* perform x fft */
cppfft2rmnxx(f,ss,isign,mixup,sct,indx,indy,kstrt,kypi,kypp,nxvh,
kypd,ndim,nxhyd,nxyhd);
/* transpose f array to g */
cpwtimera(-1,ttp,&dtime);
cppntpose(f,g,bs,br,nxh,ny,kxp,kyp,kstrt,nvp,ndim,nxvh,nyv,kxp,
kypd);
cpwtimera(1,ttp,&dtime);
/* perform y fft */
cppfft2rmnxy(g,isign,mixup,sct,indx,indy,kstrt,kxpi,kxpp,nyv,kxp,
ndim,nxhyd,nxyhd);
/* transpose g array to f */
if (ntpose==0) {
cpwtimera(-1,&tf,&dtime);
cppntpose(g,f,br,bs,ny,nxh,kyp,kxp,kstrt,nvp,ndim,nyv,nxvh,
kypd,kxp);
cpwtimera(1,&tf,&dtime);
}
}
/* forward fourier transform */
else if (isign > 0) {
/* transpose f array to g */
if (ntpose==0) {
cpwtimera(-1,&tf,&dtime);
cppntpose(f,g,bs,br,nxh,ny,kxp,kyp,kstrt,nvp,ndim,nxvh,nyv,kxp,
kypd);
cpwtimera(1,&tf,&dtime);
}
/* perform y fft */
cppfft2rmnxy(g,isign,mixup,sct,indx,indy,kstrt,kxpi,kxpp,nyv,kxp,
ndim,nxhyd,nxyhd);
/* transpose g array to f */
cpwtimera(-1,ttp,&dtime);
cppntpose(g,f,br,bs,ny,nxh,kyp,kxp,kstrt,nvp,ndim,nyv,nxvh,kypd,
kxp);
cpwtimera(1,ttp,&dtime);
/* perform x fft */
cppfft2rmnxx(f,ss,isign,mixup,sct,indx,indy,kstrt,kypi,kypp,nxvh,
kypd,ndim,nxhyd,nxyhd);
}
if (ntpose==0)
*ttp += tf;
return;
}
/*--------------------------------------------------------------------*/
void cpppcopyout(float part[], float ppart[], int kpic[], int *npp,
int npmax, int nppmx, int idimp, int mxyp1, int *irc) {
/* for 2d code, this subroutine copies segmented particle data ppart to
the array part with original tiled layout
spatial decomposition in y direction
input: all except part, npp, irc, output: part, npp, irc
part[j][i] = i-th coordinate for particle j
ppart[k][j][i] = i-th coordinate for particle j in tile k
kpic = number of particles per tilees
npp = number of particles in partition
npmax = maximum number of particles in each partition
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
mxyp1 = total number of tiles in partition
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int i, j, k, npoff, nppp, ne, ierr;
npoff = 0;
ierr = 0;
/* loop over tiles */
for (k = 0; k < mxyp1; k++) {
nppp = kpic[k];
ne = nppp + npoff;
if (ne > npmax)
ierr = ierr > ne-npmax ? ierr : ne-npmax;
if (ierr > 0)
nppp = 0;
/* loop over particles in tile */
for (j = 0; j < nppp; j++) {
for (i = 0; i < idimp; i++) {
part[i+idimp*(j+npoff)] = ppart[i+idimp*(j+nppmx*k)];
}
}
npoff += nppp;
}
*npp = npoff;
if (ierr > 0)
*irc = ierr;
return;
}
/* Interfaces to Fortran */
/*--------------------------------------------------------------------*/
void cpdicomp2l_(float *edges, int *nyp, int *noff, int *nypmx,
int *nypmn, int *ny, int *kstrt, int *nvp, int *idps) {
cpdicomp2l(edges,nyp,noff,nypmx,nypmn,*ny,*kstrt,*nvp,*idps);
return;
}
/*--------------------------------------------------------------------*/
void cpdistr2h_(float *part, float *edges, int *npp, int *nps,
float *vtx, float *vty, float *vtz, float *vdx,
float *vdy, float *vdz, int *npx, int *npy, int *nx,
int *ny, int *idimp, int *npmax, int *idps, int *ipbc,
int *ierr) {
cpdistr2h(part,edges,npp,*nps,*vtx,*vty,*vtz,*vdx,*vdy,*vdz,*npx,
*npy,*nx,*ny,*idimp,*npmax,*idps,*ipbc,ierr);
return;
}
/*--------------------------------------------------------------------*/
void cppdblkp2l_(float *part, int *kpic, int *npp, int *noff,
int *nppmx, int *idimp, int *npmax, int *mx, int *my,
int *mx1,int *mxyp1, int *irc) {
cppdblkp2l(part,kpic,*npp,*noff,nppmx,*idimp,*npmax,*mx,*my,*mx1,
*mxyp1,irc);
return;
}
/*--------------------------------------------------------------------*/
void cpppmovin2l_(float *part, float *ppart, int *kpic, int *npp,
int *noff, int *nppmx, int *idimp, int *npmax,
int *mx, int *my, int *mx1, int *mxyp1, int *irc) {
cpppmovin2l(part,ppart,kpic,*npp,*noff,*nppmx,*idimp,*npmax,*mx,*my,
*mx1,*mxyp1,irc);
return;
}
/*--------------------------------------------------------------------*/
void cpppcheck2l_(float *ppart, int *kpic, int *noff, int *nyp,
int *idimp, int *nppmx, int *nx, int *mx, int *my,
int *mx1, int *myp1, int *irc) {
cpppcheck2l(ppart,kpic,*noff,*nyp,*idimp,*nppmx,*nx,*mx,*my,*mx1,
*myp1,irc);
return;
}
/*--------------------------------------------------------------------*/
void cppgbppush23l_(float *ppart, float *fxy, float *bxy, int *kpic,
int *noff, int *nyp, float *qbm, float *dt,
float *dtc, float *ek, int *idimp, int *nppmx,
int *nx, int *ny, int *mx, int *my, int *nxv,
int *nypmx, int *mx1, int *mxyp1, int *ipbc) {
cppgbppush23l(ppart,fxy,bxy,kpic,*noff,*nyp,*qbm,*dt,*dtc,ek,*idimp,
*nppmx,*nx,*ny,*mx,*my,*nxv,*nypmx,*mx1,*mxyp1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cppgbppushf23l_(float *ppart, float *fxy, float *bxy, int *kpic,
int *ncl, int *ihole, int *noff, int *nyp,
float *qbm, float *dt, float *dtc, float *ek,
int *idimp, int *nppmx, int *nx, int *ny,
int *mx, int *my, int *nxv, int *nypmx, int *mx1,
int *mxyp1, int *ntmax, int *irc) {
cppgbppushf23l(ppart,fxy,bxy,kpic,ncl,ihole,*noff,*nyp,*qbm,*dt,*dtc,
ek,*idimp,*nppmx,*nx,*ny,*mx,*my,*nxv,*nypmx,*mx1,
*mxyp1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cppgppost2l_(float *ppart, float *q, int *kpic, int *noff,
float *qm, int *idimp, int *nppmx, int *mx, int *my,
int *nxv, int *nypmx, int *mx1, int *mxyp1) {
cppgppost2l(ppart,q,kpic,*noff, *qm,*idimp,*nppmx,*mx,*my,*nxv,
*nypmx,*mx1,*mxyp1);
return;
}
/*--------------------------------------------------------------------*/
void cppgjppost2l_(float *ppart, float *cu, int *kpic, int *noff,
float *qm, float *dt, int *nppmx, int *idimp,
int *nx, int *ny, int *mx, int *my, int *nxv,
int *nypmx, int *mx1, int *mxyp1, int *ipbc) {
cppgjppost2l(ppart,cu,kpic,*noff,*qm,*dt,*nppmx,*idimp,*nx,*ny,*mx,
*my,*nxv,*nypmx,*mx1,*mxyp1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cppgmjppost2l_(float *ppart, float *amu, int *kpic, int *noff,
float *qm, int *nppmx, int *idimp, int *mx, int *my,
int *nxv, int *nypmx, int *mx1, int *mxyp1) {
cppgmjppost2l(ppart,amu,kpic,*noff,*qm,*nppmx,*idimp,*mx,*my,*nxv,
*nypmx,*mx1,*mxyp1);
return;
}
/*--------------------------------------------------------------------*/
void cppgdjppost2l_(float *ppart, float *fxy, float *bxy, float *dcu,
float *amu, int *kpic, int *noff, int *nyp,
float *qm, float *qbm, float *dt, int *idimp,
int *nppmx, int *nx, int *mx, int *my, int *nxv,
int *nypmx, int *mx1, int *mxyp1) {
cppgdjppost2l(ppart,fxy,bxy,dcu,amu,kpic,*noff,*nyp,*qm,*qbm,*dt,
*idimp,*nppmx,*nx,*mx,*my,*nxv,*nypmx,*mx1,*mxyp1);
return;
}
/*--------------------------------------------------------------------*/
void cppgdcjppost2l_(float *ppart, float *fxy, float *bxy, float *cu,
float *dcu, float *amu, int *kpic, int *noff,
int *nyp, float *qm, float *qbm, float *dt,
int *idimp, int *nppmx, int *nx, int *mx, int *my,
int *nxv, int *nypmx, int *mx1, int *mxyp1) {
cppgdcjppost2l(ppart,fxy,bxy,cu,dcu,amu,kpic,*noff,*nyp,*qm,*qbm,*dt,
*idimp,*nppmx,*nx,*mx,*my,*nxv,*nypmx,*mx1,*mxyp1);
return;
}
/*--------------------------------------------------------------------*/
void cppporder2la_(float *ppart, float *ppbuff, float *sbufl,
float *sbufr, int *kpic, int *ncl, int *ihole,
int *ncll, int *nclr, int *noff, int *nyp,
int *idimp, int *nppmx, int *nx, int *ny, int *mx,
int *my, int *mx1, int *myp1, int *npbmx, int *ntmax,
int *nbmax, int *irc) {
cppporder2la(ppart,ppbuff,sbufl,sbufr,kpic,ncl,ihole,ncll,nclr,*noff,
*nyp,*idimp,*nppmx,*nx,*ny,*mx,*my,*mx1,*myp1,*npbmx,
*ntmax,*nbmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cppporderf2la_(float *ppart, float *ppbuff, float *sbufl,
float *sbufr, int *ncl, int *ihole, int *ncll,
int *nclr, int *idimp, int *nppmx, int *mx1,
int *myp1, int *npbmx, int *ntmax, int *nbmax,
int *irc) {
cppporderf2la(ppart,ppbuff,sbufl,sbufr,ncl,ihole,ncll,nclr,*idimp,
*nppmx,*mx1,*myp1,*npbmx,*ntmax,*nbmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cppporder2lb_(float *ppart, float *ppbuff, float *rbufl,
float *rbufr, int *kpic, int *ncl, int *ihole,
int *mcll, int *mclr, int *idimp, int *nppmx,
int *mx1, int *myp1, int *npbmx, int *ntmax,
int *nbmax, int *irc) {
cppporder2lb(ppart,ppbuff,rbufl,rbufr,kpic,ncl,ihole,mcll,mclr,
*idimp,*nppmx,*mx1,*myp1,*npbmx,*ntmax,*nbmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cppcguard2xl_(float *fxy, int *myp, int *nx, int *ndim, int *nxe,
int *nypmx) {
cppcguard2xl(fxy,*myp,*nx,*ndim,*nxe,*nypmx);
return;
}
/*--------------------------------------------------------------------*/
void cppaguard2xl_(float *q, int *myp, int *nx, int *nxe, int *nypmx) {
cppaguard2xl(q,*myp,*nx,*nxe,*nypmx);
return;
}
/*--------------------------------------------------------------------*/
void cppacguard2xl_(float *cu, int *myp, int *nx, int *ndim, int *nxe,
int *nypmx) {
cppacguard2xl(cu,*myp,*nx,*ndim,*nxe,*nypmx);
return;
}
/*--------------------------------------------------------------------*/
void cppascfguard2l_(float *dcu, float *cus, int *nyp, float *q2m0,
int *nx, int *nxe, int *nypmx) {
cppascfguard2l(dcu,cus,*nyp,*q2m0,*nx,*nxe,*nypmx);
return;
}
/*--------------------------------------------------------------------*/
void cppfwpminmx2_(float *qe, int *nyp, float *qbme, float *wpmax,
float *wpmin, int *nx, int *nxe, int *nypmx) {
cppfwpminmx2(qe,*nyp,*qbme,wpmax,wpmin,*nx,*nxe,*nypmx);
return;
}
/*--------------------------------------------------------------------*/
void cmppois23_(float complex *q, float complex *fxy, int *isign,
float complex *ffc, float *ax, float *ay, float *affp,
float *we, int *nx, int *ny, int *kstrt, int *nyv,
int *kxp, int *nyhd) {
cmppois23(q,fxy,*isign,ffc,*ax,*ay,*affp,we,*nx,*ny,*kstrt,*nyv,*kxp,
*nyhd);
return;
}
/*--------------------------------------------------------------------*/
void cmppcuperp2_(float complex *cu, int *nx, int *ny, int *kstrt,
int *nyv, int *kxp) {
cmppcuperp2(cu,*nx,*ny,*kstrt,*nyv,*kxp);
return;
}
/*--------------------------------------------------------------------*/
void cmppbbpoisp23_(float complex *cu, float complex *bxy,
float complex *ffc, float *ci, float *wm, int *nx,
int *ny, int *kstrt, int *nyv, int *kxp,
int *nyhd) {
cmppbbpoisp23(cu,bxy,ffc,*ci,wm,*nx,*ny,*kstrt,*nyv,*kxp,*nyhd);
return;
}
/*--------------------------------------------------------------------*/
void cppbaddext2_(float *bxy, int *nyp, float *omx, float *omy,
float *omz, int *nx, int *nxe, int *nypmx) {
cppbaddext2(bxy,*nyp,*omx,*omy,*omz,*nx,*nxe,*nypmx);
return;
}
/*--------------------------------------------------------------------*/
void cmppdcuperp23_(float complex *dcu, float complex *amu, int *nx,
int *ny, int *kstrt, int *nyv, int *kxp) {
cmppdcuperp23(dcu,amu,*nx,*ny,*kstrt,*nyv,*kxp);
return;
}
/*--------------------------------------------------------------------*/
void cmppadcuperp23_(float complex *dcu, float complex *amu, int *nx,
int *ny, int *kstrt, int *nyv, int *kxp) {
cmppadcuperp23(dcu,amu,*nx,*ny,*kstrt,*nyv,*kxp);
return;
}
/*--------------------------------------------------------------------*/
void cmppepoisp23_(float complex *dcu, float complex *exy, int *isign,
float complex *ffe, float *ax, float *ay,
float *affp, float *wp0, float *ci, float *wf,
int *nx, int *ny, int *kstrt, int *nyv, int *kxp,
int *nyhd) {
cmppepoisp23(dcu,exy,*isign,ffe,*ax,*ay,*affp,*wp0,*ci,wf,*nx,*ny,
*kstrt,*nyv,*kxp,*nyhd);
return;
}
/*--------------------------------------------------------------------*/
void cppaddvrfield2_(float *a, float *b, float *c, int *ndim, int *nxe,
int *nypmx) {
cppaddvrfield2(a,b,c,*ndim,*nxe,*nypmx);
return;
}
/*--------------------------------------------------------------------*/
void cwpfft2rinit_(int *mixup, float complex *sct, int *indx, int *indy,
int *nxhyd, int *nxyhd) {
cwpfft2rinit(mixup,sct,*indx,*indy,*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cppfft2rmxx_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *kstrt,
int *kypi, int *kypp, int *nxvh, int *kypd,
int *nxhyd, int *nxyhd) {
cppfft2rmxx(f,*isign,mixup,sct,*indx,*indy,*kstrt,*kypi,*kypp,*nxvh,
*kypd,*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cppfft2rmxy_(float complex *g, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *kstrt,
int *kxpi, int *kxpp, int *nyv, int *kxp, int *nxhyd,
int *nxyhd) {
cppfft2rmxy(g,*isign,mixup,sct,*indx,*indy,*kstrt,*kxpi,*kxpp,*nyv,
*kxp,*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cppfft2rm3xx_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *kstrt,
int *kypi, int *kypp, int *nxvh, int *kypd,
int *nxhyd, int *nxyhd) {
cppfft2rm3xx(f,*isign,mixup,sct,*indx,*indy,*kstrt,*kypi,*kypp,*nxvh,
*kypd,*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cppfft2rm3xy_(float complex *g, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *kstrt,
int *kxpi, int *kxpp, int *nyv, int *kxp, int *nxhyd,
int *nxyhd) {
cppfft2rm3xy(g,*isign,mixup,sct,*indx,*indy,*kstrt,*kxpi,*kxpp,*nyv,
*kxp,*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cppfft2rmnxx_(float complex *f, float complex *ss, int *isign,
int *mixup, float complex *sct, int *indx, int *indy,
int *kstrt, int *kypi, int *kypp, int *nxvh,
int *kypd, int *ndim, int *nxhyd, int *nxyhd) {
cppfft2rmnxx(f,ss,*isign,mixup,sct,*indx,*indy,*kstrt,*kypi,*kypp,
*nxvh,*kypd,*ndim,*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cppfft2rmnxy_(float complex *g, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *kstrt,
int *kxpi, int *kxpp, int *nyv, int *kxp, int *ndim,
int *nxhyd, int *nxyhd) {
cppfft2rmnxy(g,*isign,mixup,sct,*indx,*indy,*kstrt,*kxpi,*kxpp,*nyv,
*kxp,*ndim,*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cwppfft2rm_(float complex *f, float complex *g, float complex *bs,
float complex *br, int *isign, int *ntpose, int *mixup,
float complex *sct, float *ttp, int *indx, int *indy,
int *kstrt, int *nvp, int *nxvh, int *nyv, int *kxp,
int *kyp, int *kypd, int *nxhyd, int *nxyhd) {
cwppfft2rm(f,g,bs,br,*isign,*ntpose,mixup,sct,ttp,*indx,*indy,*kstrt,
*nvp,*nxvh,*nyv,*kxp,*kyp,*kypd,*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cwppfft2rm3_(float complex *f, float complex *g, float complex *bs,
float complex *br, int *isign, int *ntpose,
int *mixup, float complex *sct, float *ttp, int *indx,
int *indy, int *kstrt, int *nvp, int *nxvh, int *nyv,
int *kxp, int *kyp, int *kypd, int *nxhyd,
int *nxyhd) {
cwppfft2rm3(f,g,bs,br,*isign,*ntpose,mixup,sct,ttp,*indx,*indy,
*kstrt,*nvp,*nxvh,*nyv,*kxp,*kyp,*kypd,*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cwppfft2rmn_(float complex *f, float complex *g,
float complex *bs, float complex *br,
float complex *ss, int *isign, int *ntpose, int *mixup,
float complex *sct, float *ttp, int *indx, int *indy,
int *kstrt, int *nvp, int *nxvh, int *nyv, int *kxp,
int *kyp, int *kypd, int *ndim, int *nxhyd,
int *nxyhd) {
cwppfft2rmn(f,g,bs,br,ss,*isign,*ntpose,mixup,sct,ttp,*indx,*indy,
*kstrt,*nvp,*nxvh,*nyv,*kxp,*kyp,*kypd,*ndim,*nxhyd,
*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cmppswapc2n_(float *f, float *s, int *isign, int *nxh, int *kypi,
int *kypt, int *nxvh, int *kypd, int *ndim) {
cmppswapc2n(f,s,*isign,*nxh,*kypi,*kypt,*nxvh,*kypd,*ndim);
return;
}
|
utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file utils.h
* \brief Basic utilility functions.
*/
#ifndef MXNET_COMMON_UTILS_H_
#define MXNET_COMMON_UTILS_H_
#include <dmlc/logging.h>
#include <dmlc/omp.h>
#include <nnvm/graph.h>
#include <nnvm/node.h>
#include <mxnet/engine.h>
#include <mxnet/ndarray.h>
#include <mxnet/op_attr_types.h>
#include <mxnet/graph_attr_types.h>
#include <nnvm/graph_attr_types.h>
#include <memory>
#include <vector>
#include <type_traits>
#include <utility>
#include <random>
#include <string>
#include <thread>
#include <algorithm>
#include <functional>
#include <limits>
#include "../operator/mxnet_op.h"
#if MXNET_USE_MKLDNN == 1
#include "../operator/nn/mkldnn/mkldnn_base-inl.h"
#endif
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
#include <windows.h>
#else
#include <unistd.h>
#endif
namespace mxnet {
namespace common {
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
inline size_t current_process_id() { return ::GetCurrentProcessId(); }
#else
inline size_t current_process_id() { return getpid(); }
#endif
/*!
* \brief IndPtr should be non-negative, in non-decreasing order, start with 0
* and end with value equal with size of indices.
*/
struct csr_indptr_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr,
const nnvm::dim_t end, const nnvm::dim_t idx_size) {
if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] ||
(i == 0 && indptr[i] != 0) ||
(i == end - 1 && indptr[end] != idx_size))
*out = kCSRIndPtrErr;
}
};
/*!
* \brief Indices should be non-negative, less than the number of columns
* and in ascending order per row.
*/
struct csr_idx_check {
template<typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const RType* indptr, const nnvm::dim_t ncols) {
for (RType j = indptr[i]; j < indptr[i+1]; j++) {
if (idx[j] >= ncols || idx[j] < 0 ||
(j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) {
*out = kCSRIdxErr;
break;
}
}
}
};
/*!
* \brief Indices of RSPNDArray should be non-negative,
* less than the size of first dimension and in ascending order
*/
struct rsp_idx_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const nnvm::dim_t end, const nnvm::dim_t nrows) {
if ((i < end && idx[i+1] <= idx[i])
|| idx[i] < 0 || idx[i] >= nrows)
*out = kRSPIdxErr;
}
};
template<typename xpu>
void CheckFormatWrapper(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check);
/*!
* \brief Check the validity of CSRNDArray.
* \param rctx Execution context.
* \param input Input NDArray of CSRStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kCSRStorage)
<< "CheckFormatCSRImpl is for CSRNDArray";
const mxnet::TShape shape = input.shape();
const mxnet::TShape idx_shape = input.aux_shape(csr::kIdx);
const mxnet::TShape indptr_shape = input.aux_shape(csr::kIndPtr);
const mxnet::TShape storage_shape = input.storage_shape();
if ((shape.ndim() != 2) ||
(idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) ||
(indptr_shape[0] != shape[0] + 1) ||
(idx_shape[0] != storage_shape[0])) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kCSRShapeErr;
});
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(),
indptr_shape[0] - 1, idx_shape[0]);
// no need to check indices if indices are empty
if (idx_shape[0] != 0) {
Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIdx).dptr<IType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]);
}
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
});
}
}
/*!
* \brief Check the validity of RowSparseNDArray.
* \param rctx Execution context.
* \param input Input NDArray of RowSparseStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kRowSparseStorage)
<< "CheckFormatRSPImpl is for RSPNDArray";
const mxnet::TShape idx_shape = input.aux_shape(rowsparse::kIdx);
if (idx_shape[0] != input.storage_shape()[0]) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kRSPShapeErr;
});
return;
}
if (idx_shape[0] == 0) {
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0],
val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(),
idx_shape[0] - 1, input.shape()[0]);
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
}
}
template<typename xpu>
void CheckFormatImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
int stype = input.storage_type();
if (stype == kCSRStorage) {
CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kRowSparseStorage) {
CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kDefaultStorage) {
// no-op for default storage
} else {
LOG(FATAL) << "Unknown storage type " << stype;
}
}
/*! \brief Pick rows specified by user input index array from a row sparse ndarray
* and save them in the output sparse ndarray.
*/
template<typename xpu>
void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s,
const NDArray& input_nd,
const TBlob& idx_data,
const OpReqType req,
NDArray* output_nd);
/* \brief Casts tensor storage type to the new type.
*/
template<typename xpu>
void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output);
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype`.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype) {
if (!vstorage.empty()) {
for (const auto& i : vstorage) {
if (i != stype) return false;
}
return true;
}
return false;
}
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype1`
* or `stype2'. Sets boolean if both found.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!vstorage.empty()) {
uint8_t has = 0;
for (const auto i : vstorage) {
if (i == stype1) {
has |= 1;
} else if (i == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as target `stype`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() != stype) {
return false;
}
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as targets `stype1` or `stype2`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!ndarrays.empty()) {
uint8_t has = 0;
for (const auto& nd : ndarrays) {
const NDArrayStorageType stype = nd.storage_type();
if (stype == stype1) {
has |= 1;
} else if (stype == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if storage type of any array in `ndarrays`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() == stype) {
return true;
}
}
}
return false;
}
/*! \brief returns true if any storage type `ndstype` in `ndstypes`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<int>& ndstypes,
const NDArrayStorageType stype) {
if (!ndstypes.empty()) {
for (const auto& ndstype : ndstypes) {
if (ndstype == stype) {
return true;
}
}
}
return false;
}
/*! \brief get string representation of dispatch_mode */
inline std::string dispatch_mode_string(const DispatchMode x) {
switch (x) {
case DispatchMode::kFCompute:
return "fcompute";
case DispatchMode::kFComputeEx:
return "fcompute_ex";
case DispatchMode::kFComputeFallback:
return "fcompute_fallback";
case DispatchMode::kVariable:
return "variable";
case DispatchMode::kUndefined:
return "undefined";
}
return "unknown";
}
/*! \brief get string representation of storage_type */
inline std::string stype_string(const int x) {
switch (x) {
case kDefaultStorage:
return "default";
case kCSRStorage:
return "csr";
case kRowSparseStorage:
return "row_sparse";
}
return "unknown";
}
/*! \brief get string representation of device type */
inline std::string dev_type_string(const int dev_type) {
switch (dev_type) {
case Context::kCPU:
return "cpu";
case Context::kGPU:
return "gpu";
case Context::kCPUPinned:
return "cpu_pinned";
case Context::kCPUShared:
return "cpu_shared";
}
return "unknown";
}
inline std::string attr_value_string(const nnvm::NodeAttrs& attrs,
const std::string& attr_name,
std::string default_val = "") {
if (attrs.dict.find(attr_name) == attrs.dict.end()) {
return default_val;
}
return attrs.dict.at(attr_name);
}
/*! \brief get string representation of the operator stypes */
inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>& in_attrs,
const std::vector<int>& out_attrs) {
std::ostringstream os;
os << "operator = " << attrs.op->name
<< "\ninput storage types = [";
for (const int attr : in_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "output storage types = [";
for (const int attr : out_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "params = {";
for (auto kv : attrs.dict) {
os << "\"" << kv.first << "\" : " << kv.second << ", ";
}
os << "}\n"
<< "context.dev_mask = " << dev_type_string(dev_mask);
return os.str();
}
/*! \brief get string representation of the operator */
inline std::string operator_string(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
std::string result = "";
std::vector<int> in_stypes;
std::vector<int> out_stypes;
in_stypes.reserve(inputs.size());
out_stypes.reserve(outputs.size());
auto xform = [](const NDArray arr) -> int { return arr.storage_type(); };
std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform);
std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform);
result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes);
return result;
}
/*! \brief log message once. Intended for storage fallback warning messages. */
inline void LogOnce(const std::string& message) {
typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore;
auto log_store = LogStore::Get();
if (log_store->find(message) == log_store->end()) {
LOG(INFO) << message;
log_store->insert(message);
}
}
/*! \brief log storage fallback event
*/
inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>* in_attrs,
const std::vector<int>* out_attrs) {
static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true);
if (!log) return;
const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
std::ostringstream os;
const char* warning = "\nThe operator with default storage type will be dispatched "
"for execution. You're seeing this warning message because the operator above is unable "
"to process the given ndarrays with specified storage types, context and parameter. "
"Temporary dense ndarrays are generated in order to execute the operator. "
"This does not affect the correctness of the programme. "
"You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to "
"0 to suppress this warning.";
os << "\nStorage type fallback detected:\n" << op_str << warning;
LogOnce(os.str());
#if MXNET_USE_MKLDNN == 1
if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. "
"You can re-enable by setting MXNET_MKLDNN_ENABLED=1");
if (GetMKLDNNCacheSize() != -1) common::LogOnce("MXNET_MKLDNN_CACHE_NUM is set."
"Should only be set if "
"your model has variable input shapes, "
"as cache size may grow unbounded");
#endif
}
// heuristic to dermine number of threads per GPU
inline int GetNumThreadsPerGPU() {
// This is resource efficient option.
return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2);
}
// heuristic to get number of matching colors.
// this decides how much parallelism we can get in each GPU.
inline int GetExecNumMatchColor() {
// This is resource efficient option.
int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1);
return std::min(num_match_color, GetNumThreadsPerGPU());
}
template<typename T, typename V>
V ParallelAccumulate(const T* a, const int n, V start) {
V sum = start;
#pragma omp parallel for reduction(+:sum)
for (int i = 0; i < n; ++i) {
sum += a[i];
}
return sum;
}
/*!
* \brief
* Helper function for ParallelSort.
* DO NOT call this function directly.
* Use the interface ParallelSort instead.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSortHelper(RandomIt first, size_t len,
size_t grainsize, const Compare& comp) {
if (len < grainsize) {
std::sort(first, first+len, comp);
} else {
std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp);
ParallelSortHelper(first+len/2, len - len/2, grainsize, comp);
thr.join();
std::inplace_merge(first, first+len/2, first+len, comp);
}
}
/*!
* \brief
* Sort the elements in the range [first, last) into the ascending order defined by
* the comparator comp.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) {
const auto num = std::distance(first, last);
size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16));
ParallelSortHelper(first, num, grainsize, comp);
}
/*!
* \brief
* Sort the elements in the range [first, last) into ascending order.
* The elements are compared using the default < operator.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) {
ParallelSort(first, last, num_threads,
std::less<typename std::iterator_traits<RandomIt>::value_type>());
}
/*!
* \brief Random Engine
*/
typedef std::mt19937 RANDOM_ENGINE;
/*!
* \brief Helper functions.
*/
namespace helper {
/*!
* \brief Helper for non-array type `T`.
*/
template <class T>
struct UniqueIf {
/*!
* \brief Type of `T`.
*/
using SingleObject = std::unique_ptr<T>;
};
/*!
* \brief Helper for an array of unknown bound `T`.
*/
template <class T>
struct UniqueIf<T[]> {
/*!
* \brief Type of `T`.
*/
using UnknownBound = std::unique_ptr<T[]>;
};
/*!
* \brief Helper for an array of known bound `T`.
*/
template <class T, size_t kSize>
struct UniqueIf<T[kSize]> {
/*!
* \brief Type of `T`.
*/
using KnownBound = void;
};
} // namespace helper
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs a non-array type `T`. The arguments `args` are passed to the
* constructor of `T`. The function does not participate in the overload
* resolution if `T` is an array type.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param n The size of the array to construct.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs an array of unknown bound `T`. The function does not participate
* in the overload resolution unless `T` is an array of unknown bound.
*/
template <class T>
typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) {
using U = typename std::remove_extent<T>::type;
return std::unique_ptr<T>(new U[n]{});
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
*
* Constructs an arrays of known bound is disallowed.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete;
template<typename FCompType>
FCompType GetFCompute(const nnvm::Op* op, const std::string& name,
const Context& ctx) {
static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>");
static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>");
if (ctx.dev_mask() == cpu::kDevMask) {
return fcompute_cpu.get(op, nullptr);
} else if (ctx.dev_mask() == gpu::kDevMask) {
return fcompute_gpu.get(op, nullptr);
} else {
LOG(FATAL) << "Unknown device mask " << ctx.dev_mask();
return nullptr;
}
}
/*!
* \brief Return the max integer value representable in the type `T` without loss of precision.
*/
template <typename T>
constexpr size_t MaxIntegerValue() {
return std::is_integral<T>::value ?
std::numeric_limits<T>::max():
size_t(2) << (std::numeric_limits<T>::digits - 1);
}
template <>
constexpr size_t MaxIntegerValue<mshadow::half::half_t>() {
return size_t(2) << 10;
}
template <>
constexpr size_t MaxIntegerValue<mshadow::bfloat::bf16_t>() {
return size_t(2) << 14;
}
MSHADOW_XINLINE int ilog2ul(size_t a) {
int k = 1;
while (a >>= 1) ++k;
return k;
}
MSHADOW_XINLINE int ilog2ui(unsigned int a) {
int k = 1;
while (a >>= 1) ++k;
return k;
}
/*!
* \brief Return an NDArray of all zeros.
*/
inline NDArray InitZeros(const NDArrayStorageType stype, const mxnet::TShape &shape,
const Context &ctx, const int dtype) {
// NDArray with default storage
if (stype == kDefaultStorage) {
NDArray ret(shape, ctx, false, dtype);
ret = 0;
return ret;
}
// NDArray with non-default storage. Storage allocation is always delayed.
return NDArray(stype, shape, ctx, true, dtype);
}
/*!
* \brief Helper to add a NDArray of zeros to a std::vector.
*/
inline void EmplaceBackZeros(const NDArrayStorageType stype, const mxnet::TShape &shape,
const Context &ctx, const int dtype,
std::vector<NDArray> *vec) {
// NDArray with default storage
if (stype == kDefaultStorage) {
vec->emplace_back(shape, ctx, false, dtype);
vec->back() = 0;
} else {
// NDArray with non-default storage. Storage allocation is always delayed.
vec->emplace_back(stype, shape, ctx, true, dtype);
}
}
/*!
* \brief parallelize copy by OpenMP.
*/
template<typename DType>
inline void ParallelCopy(DType* dst, const DType* src, index_t size) {
static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= copy_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] = src[i];
}
} else {
#pragma GCC diagnostic push
#if __GNUC__ >= 8
#pragma GCC diagnostic ignored "-Wclass-memaccess"
#endif
std::memcpy(dst, src, sizeof(DType) * size);
#pragma GCC diagnostic pop
}
}
/*!
* \breif parallelize add by OpenMP
*/
template<typename DType>
inline void ParallelAdd(DType* dst, const DType* src, index_t size) {
static index_t add_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= add_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
} else {
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
}
}
/*!
* \brief If numpy compatibility is turned off (default), the shapes passed in
* by users follow the legacy shape definition:
* 1. 0 ndim means the shape is completely unknown.
* 2. 0 dim size means the dim size is unknown.
* We need to convert those shapes to use the numpy shape definition:
* 1. 0 ndim means it's a scalar tensor.
* 2. -1 ndim means the shape is unknown.
* 3. 0 dim size means no elements in that dimension.
* 4. -1 dim size means the dimension's size is unknown.
* so that operator's infer shape function can work in backend.
* \param shape to be converted.
* Note: It is possible that the shape to be converted is already
* numpy compatible. For example, when a subgraph operator's infer
* shape function is called from the infer shape pass of the whole
* graph, its input/output shapes have been converted to numpy
* compatible shapes.
*/
inline void ConvertToNumpyShape(mxnet::TShape* shape) {
if (shape->ndim() == 0) { // legacy shape ndim = 0 means unknown
*shape = mxnet::TShape(); // unknown shape ndim = -1
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if ((*shape)[j] == 0) { // legacy shape dim_size = 0 means unknown
(*shape)[j] = -1; // unknown dim size = -1
}
}
}
}
inline void ConvertToNumpyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToNumpyShape(&(shapes->at(i)));
}
}
/*!
* \brief This is function is used to convert shapes returned by
* the infer shape functions/pass to the legacy shape definition.
*/
inline void ConvertToLegacyShape(mxnet::TShape* shape) {
if (!mxnet::ndim_is_known(*shape)) {
*shape = mxnet::TShape(0, -1);
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if (!mxnet::dim_size_is_known(*shape, j)) {
(*shape)[j] = 0;
}
}
}
}
inline void ConvertToLegacyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToLegacyShape(&(shapes->at(i)));
}
}
void ExecuteMonInputCallback(
const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays,
size_t nid, const std::function<void(const char *, const char *, void *)>
&monitor_callback);
void ExecuteMonOutputCallback(
const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays,
size_t nid, const std::function<void(const char *, const char *, void *)>
&monitor_callback);
/*!
* \brief This is function can return the output names of a NodeEntry.
*/
static inline std::string GetOutputName(const nnvm::NodeEntry& e) {
nnvm::Symbol sym;
sym.outputs.push_back(e);
return sym.ListOutputNames()[0];
}
inline mxnet::TShape CanonicalizeAxes(const mxnet::TShape& src) {
// convert negative axes to positive values
const int ndim = src.ndim();
mxnet::TShape axes = src;
for (int i = 0; i < ndim; ++i) {
if (axes[i] < 0) {
axes[i] += ndim;
}
CHECK(axes[i] >= 0 && axes[i] < ndim) << "axes[" << i << "]="
<< axes[i] << " exceeds the range ["
<< 0 << ", " << ndim << ")";
}
return axes;
}
inline bool is_float(const int dtype) {
return dtype == mshadow::kFloat32 || dtype == mshadow::kFloat64 || dtype == mshadow::kFloat16;
}
inline int get_more_precise_type(const int type1, const int type2) {
if (type1 == type2) return type1;
if (is_float(type1) && is_float(type2)) {
if (type1 == mshadow::kFloat64 || type2 == mshadow::kFloat64) {
return mshadow::kFloat64;
}
if (type1 == mshadow::kFloat32 || type2 == mshadow::kFloat32) {
return mshadow::kFloat32;
}
return mshadow::kFloat16;
} else if (is_float(type1) || is_float(type2)) {
return is_float(type1) ? type1 : type2;
}
if (type1 == mshadow::kInt64 || type2 == mshadow::kInt64) {
return mshadow::kInt64;
}
if (type1 == mshadow::kInt32 || type2 == mshadow::kInt32) {
return mshadow::kInt32;
}
CHECK(!((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) ||
(type1 == mshadow::kInt8 && type2 == mshadow::kUint8)))
<< "1 is UInt8 and 1 is Int8 should not get here";
if (type1 == mshadow::kUint8 || type2 == mshadow::kUint8) {
return mshadow::kUint8;
}
return mshadow::kInt8;
}
inline int np_binary_out_infer_type(const int type1, const int type2) {
if ((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) ||
(type1 == mshadow::kInt8 && type2 == mshadow::kUint8)) {
return mshadow::kInt32;
}
return get_more_precise_type(type1, type2);
}
} // namespace common
} // namespace mxnet
#endif // MXNET_COMMON_UTILS_H_
|
GB_unaryop__abs_uint8_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint8_int64
// op(A') function: GB_tran__abs_uint8_int64
// C type: uint8_t
// A type: int64_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT8 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint8_int64
(
uint8_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint8_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dataset.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_DATASET_H_
#define LIGHTGBM_DATASET_H_
#include <LightGBM/config.h>
#include <LightGBM/feature_group.h>
#include <LightGBM/meta.h>
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/text_reader.h>
#include <string>
#include <functional>
#include <memory>
#include <mutex>
#include <unordered_set>
#include <utility>
#include <vector>
namespace LightGBM {
/*! \brief forward declaration */
class DatasetLoader;
/*!
* \brief This class is used to store some meta(non-feature) data for training data,
* e.g. labels, weights, initial scores, query level informations.
*
* Some details:
* 1. Label, used for training.
* 2. Weights, weighs of records, optional
* 3. Query Boundaries, necessary for lambdarank.
* The documents of i-th query is in [ query_boundaries[i], query_boundaries[i+1] )
* 4. Query Weights, auto calculate by weights and query_boundaries(if both of them are existed)
* the weight for i-th query is sum(query_boundaries[i] , .., query_boundaries[i+1]) / (query_boundaries[i + 1] - query_boundaries[i+1])
* 5. Initial score. optional. if existing, the model will boost from this score, otherwise will start from 0.
*/
class Metadata {
public:
/*!
* \brief Null constructor
*/
Metadata();
/*!
* \brief Initialization will load query level informations, since it is need for sampling data
* \param data_filename Filename of data
* \param init_score_filename Filename of initial score
*/
void Init(const char* data_filename, const char* initscore_file);
/*!
* \brief init as subset
* \param metadata Filename of data
* \param used_indices
* \param num_used_indices
*/
void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices);
/*!
* \brief Initial with binary memory
* \param memory Pointer to memory
*/
void LoadFromMemory(const void* memory);
/*! \brief Destructor */
~Metadata();
/*!
* \brief Initial work, will allocate space for label, weight(if exists) and query(if exists)
* \param num_data Number of training data
* \param weight_idx Index of weight column, < 0 means doesn't exists
* \param query_idx Index of query id column, < 0 means doesn't exists
*/
void Init(data_size_t num_data, int weight_idx, int query_idx);
/*!
* \brief Partition label by used indices
* \param used_indices Indices of local used
*/
void PartitionLabel(const std::vector<data_size_t>& used_indices);
/*!
* \brief Partition meta data according to local used indices if need
* \param num_all_data Number of total training data, including other machines' data on parallel learning
* \param used_data_indices Indices of local used training data
*/
void CheckOrPartition(data_size_t num_all_data,
const std::vector<data_size_t>& used_data_indices);
void SetLabel(const label_t* label, data_size_t len);
void SetWeights(const label_t* weights, data_size_t len);
void SetQuery(const data_size_t* query, data_size_t len);
/*!
* \brief Set initial scores
* \param init_score Initial scores, this class will manage memory for init_score.
*/
void SetInitScore(const double* init_score, data_size_t len);
/*!
* \brief Save binary data to file
* \param file File want to write
*/
void SaveBinaryToFile(const VirtualFileWriter* writer) const;
/*!
* \brief Get sizes in byte of this object
*/
size_t SizesInByte() const;
/*!
* \brief Get pointer of label
* \return Pointer of label
*/
inline const label_t* label() const { return label_.data(); }
/*!
* \brief Set label for one record
* \param idx Index of this record
* \param value Label value of this record
*/
inline void SetLabelAt(data_size_t idx, label_t value) {
label_[idx] = value;
}
/*!
* \brief Set Weight for one record
* \param idx Index of this record
* \param value Weight value of this record
*/
inline void SetWeightAt(data_size_t idx, label_t value) {
weights_[idx] = value;
}
/*!
* \brief Set Query Id for one record
* \param idx Index of this record
* \param value Query Id value of this record
*/
inline void SetQueryAt(data_size_t idx, data_size_t value) {
queries_[idx] = static_cast<data_size_t>(value);
}
/*!
* \brief Get weights, if not exists, will return nullptr
* \return Pointer of weights
*/
inline const label_t* weights() const {
if (!weights_.empty()) {
return weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get data boundaries on queries, if not exists, will return nullptr
* we assume data will order by query,
* the interval of [query_boundaris[i], query_boundaris[i+1])
* is the data indices for query i.
* \return Pointer of data boundaries on queries
*/
inline const data_size_t* query_boundaries() const {
if (!query_boundaries_.empty()) {
return query_boundaries_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get Number of queries
* \return Number of queries
*/
inline data_size_t num_queries() const { return num_queries_; }
/*!
* \brief Get weights for queries, if not exists, will return nullptr
* \return Pointer of weights for queries
*/
inline const label_t* query_weights() const {
if (!query_weights_.empty()) {
return query_weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get initial scores, if not exists, will return nullptr
* \return Pointer of initial scores
*/
inline const double* init_score() const {
if (!init_score_.empty()) {
return init_score_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get size of initial scores
*/
inline int64_t num_init_score() const { return num_init_score_; }
/*! \brief Disable copy */
Metadata& operator=(const Metadata&) = delete;
/*! \brief Disable copy */
Metadata(const Metadata&) = delete;
private:
/*! \brief Load initial scores from file */
void LoadInitialScore(const char* initscore_file);
/*! \brief Load wights from file */
void LoadWeights();
/*! \brief Load query boundaries from file */
void LoadQueryBoundaries();
/*! \brief Load query wights */
void LoadQueryWeights();
/*! \brief Filename of current data */
std::string data_filename_;
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Number of weights, used to check correct weight file */
data_size_t num_weights_;
/*! \brief Label data */
std::vector<label_t> label_;
/*! \brief Weights data */
std::vector<label_t> weights_;
/*! \brief Query boundaries */
std::vector<data_size_t> query_boundaries_;
/*! \brief Query weights */
std::vector<label_t> query_weights_;
/*! \brief Number of querys */
data_size_t num_queries_;
/*! \brief Number of Initial score, used to check correct weight file */
int64_t num_init_score_;
/*! \brief Initial score */
std::vector<double> init_score_;
/*! \brief Queries data */
std::vector<data_size_t> queries_;
/*! \brief mutex for threading safe call */
std::mutex mutex_;
bool weight_load_from_file_;
bool query_load_from_file_;
bool init_score_load_from_file_;
};
/*! \brief Interface for Parser */
class Parser {
public:
/*! \brief virtual destructor */
virtual ~Parser() {}
/*!
* \brief Parse one line with label
* \param str One line record, string format, should end with '\0'
* \param out_features Output columns, store in (column_idx, values)
* \param out_label Label will store to this if exists
*/
virtual void ParseOneLine(const char* str,
std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0;
virtual int NumFeatures() const = 0;
/*!
* \brief Create an object of parser, will auto choose the format depend on file
* \param filename One Filename of data
* \param num_features Pass num_features of this data file if you know, <=0 means don't know
* \param label_idx index of label column
* \return Object of parser
*/
static Parser* CreateParser(const char* filename, bool header, int num_features, int label_idx);
};
/*! \brief The main class of data set,
* which are used to training or validation
*/
class Dataset {
public:
friend DatasetLoader;
LIGHTGBM_EXPORT Dataset();
LIGHTGBM_EXPORT Dataset(data_size_t num_data);
void Construct(
std::vector<std::unique_ptr<BinMapper>>* bin_mappers,
int num_total_features,
const std::vector<std::vector<double>>& forced_bins,
int** sample_non_zero_indices,
const int* num_per_col,
size_t total_sample_cnt,
const Config& io_config);
/*! \brief Destructor */
LIGHTGBM_EXPORT ~Dataset();
LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const {
if (num_features_ != other.num_features_) {
return false;
}
if (num_total_features_ != other.num_total_features_) {
return false;
}
if (label_idx_ != other.label_idx_) {
return false;
}
for (int i = 0; i < num_features_; ++i) {
if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) {
return false;
}
}
return true;
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) {
if (is_finish_load_) { return; }
for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) {
int feature_idx = used_feature_map_[i];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]);
}
}
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) {
if (is_finish_load_) { return; }
for (auto& inner_data : feature_values) {
if (inner_data.first >= num_total_features_) { continue; }
int feature_idx = used_feature_map_[inner_data.first];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second);
}
}
}
inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) {
feature_groups_[group]->PushData(tid, sub_feature, row_idx, value);
}
inline int RealFeatureIndex(int fidx) const {
return real_feature_idx_[fidx];
}
inline int InnerFeatureIndex(int col_idx) const {
return used_feature_map_[col_idx];
}
inline int Feature2Group(int feature_idx) const {
return feature2group_[feature_idx];
}
inline int Feture2SubFeature(int feature_idx) const {
return feature2subfeature_[feature_idx];
}
inline uint64_t GroupBinBoundary(int group_idx) const {
return group_bin_boundaries_[group_idx];
}
inline uint64_t NumTotalBin() const {
return group_bin_boundaries_.back();
}
inline std::vector<int> ValidFeatureIndices() const {
std::vector<int> ret;
for (int i = 0; i < num_total_features_; ++i) {
if (used_feature_map_[i] >= 0) {
ret.push_back(i);
}
}
return ret;
}
void ReSize(data_size_t num_data);
void CopySubset(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data);
LIGHTGBM_EXPORT void FinishLoad();
LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr);
LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr);
LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr);
LIGHTGBM_EXPORT bool GetInt8Field(const char* field_name, data_size_t* out_len, const int8_t** out_ptr);
/*!
* \brief Save current dataset into binary file, will save to "filename.bin"
*/
LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename);
LIGHTGBM_EXPORT void DumpTextFile(const char* text_filename);
LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset);
LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset);
void ConstructHistograms(const std::vector<int8_t>& is_feature_used,
const data_size_t* data_indices, data_size_t num_data,
int leaf_idx,
std::vector<std::unique_ptr<OrderedBin>>* ordered_bins,
const score_t* gradients, const score_t* hessians,
score_t* ordered_gradients, score_t* ordered_hessians,
bool is_constant_hessian,
HistogramBinEntry* histogram_data) const;
void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data,
HistogramBinEntry* data) const;
inline data_size_t Split(int feature,
const uint32_t* threshold, int num_threshold, bool default_left,
data_size_t* data_indices, data_size_t num_data,
data_size_t* lte_indices, data_size_t* gt_indices) const {
const int group = feature2group_[feature];
const int sub_feature = feature2subfeature_[feature];
return feature_groups_[group]->Split(sub_feature, threshold, num_threshold, default_left, data_indices, num_data, lte_indices, gt_indices);
}
inline int SubFeatureBinOffset(int i) const {
const int sub_feature = feature2subfeature_[i];
if (sub_feature == 0) {
return 1;
} else {
return 0;
}
}
inline int FeatureNumBin(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin();
}
inline int8_t FeatureMonotone(int i) const {
if (monotone_types_.empty()) {
return 0;
} else {
return monotone_types_[i];
}
}
inline double FeaturePenalte(int i) const {
if (feature_penalty_.empty()) {
return 1;
} else {
return feature_penalty_[i];
}
}
bool HasMonotone() const {
if (monotone_types_.empty()) {
return false;
} else {
for (size_t i = 0; i < monotone_types_.size(); ++i) {
if (monotone_types_[i] != 0) {
return true;
}
}
return false;
}
}
inline int FeatureGroupNumBin(int group) const {
return feature_groups_[group]->num_total_bin_;
}
inline const BinMapper* FeatureBinMapper(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature].get();
}
inline const Bin* FeatureBin(int i) const {
const int group = feature2group_[i];
return feature_groups_[group]->bin_data_.get();
}
inline const Bin* FeatureGroupBin(int group) const {
return feature_groups_[group]->bin_data_.get();
}
inline bool FeatureGroupIsSparse(int group) const {
return feature_groups_[group]->is_sparse_;
}
inline BinIterator* FeatureIterator(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->SubFeatureIterator(sub_feature);
}
inline BinIterator* FeatureGroupIterator(int group) const {
return feature_groups_[group]->FeatureGroupIterator();
}
inline double RealThreshold(int i, uint32_t threshold) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold);
}
// given a real threshold, find the closest threshold bin
inline uint32_t BinThreshold(int i, double threshold_double) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double);
}
inline void CreateOrderedBins(std::vector<std::unique_ptr<OrderedBin>>* ordered_bins) const {
ordered_bins->resize(num_groups_);
OMP_INIT_EX();
#pragma omp parallel for schedule(guided)
for (int i = 0; i < num_groups_; ++i) {
OMP_LOOP_EX_BEGIN();
ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin());
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
}
/*!
* \brief Get meta data pointer
* \return Pointer of meta data
*/
inline const Metadata& metadata() const { return metadata_; }
/*! \brief Get Number of used features */
inline int num_features() const { return num_features_; }
/*! \brief Get Number of feature groups */
inline int num_feature_groups() const { return num_groups_;}
/*! \brief Get Number of total features */
inline int num_total_features() const { return num_total_features_; }
/*! \brief Get the index of label column */
inline int label_idx() const { return label_idx_; }
/*! \brief Get names of current data set */
inline const std::vector<std::string>& feature_names() const { return feature_names_; }
inline void set_feature_names(const std::vector<std::string>& feature_names) {
if (feature_names.size() != static_cast<size_t>(num_total_features_)) {
Log::Fatal("Size of feature_names error, should equal with total number of features");
}
feature_names_ = std::vector<std::string>(feature_names);
// replace ' ' in feature_names with '_'
bool spaceInFeatureName = false;
for (auto& feature_name : feature_names_) {
// check ascii
if (!Common::CheckASCII(feature_name)) {
Log::Fatal("Do not support non-ascii characters in feature name.");
}
if (feature_name.find(' ') != std::string::npos) {
spaceInFeatureName = true;
std::replace(feature_name.begin(), feature_name.end(), ' ', '_');
}
}
if (spaceInFeatureName) {
Log::Warning("Find whitespaces in feature_names, replace with underlines");
}
}
inline std::vector<std::string> feature_infos() const {
std::vector<std::string> bufs;
for (int i = 0; i < num_total_features_; i++) {
int fidx = used_feature_map_[i];
if (fidx == -1) {
bufs.push_back("none");
} else {
const auto bin_mapper = FeatureBinMapper(fidx);
bufs.push_back(bin_mapper->bin_info());
}
}
return bufs;
}
void ResetConfig(const char* parameters);
/*! \brief Get Number of data */
inline data_size_t num_data() const { return num_data_; }
/*! \brief Disable copy */
Dataset& operator=(const Dataset&) = delete;
/*! \brief Disable copy */
Dataset(const Dataset&) = delete;
void addFeaturesFrom(Dataset* other);
private:
std::string data_filename_;
/*! \brief Store used features */
std::vector<std::unique_ptr<FeatureGroup>> feature_groups_;
/*! \brief Mapper from real feature index to used index*/
std::vector<int> used_feature_map_;
/*! \brief Number of used features*/
int num_features_;
/*! \brief Number of total features*/
int num_total_features_;
/*! \brief Number of total data*/
data_size_t num_data_;
/*! \brief Store some label level data*/
Metadata metadata_;
/*! \brief index of label column */
int label_idx_ = 0;
/*! \brief Threshold for treating a feature as a sparse feature */
double sparse_threshold_;
/*! \brief store feature names */
std::vector<std::string> feature_names_;
/*! \brief store feature names */
static const char* binary_file_token;
int num_groups_;
std::vector<int> real_feature_idx_;
std::vector<int> feature2group_;
std::vector<int> feature2subfeature_;
std::vector<uint64_t> group_bin_boundaries_;
std::vector<int> group_feature_start_;
std::vector<int> group_feature_cnt_;
std::vector<int8_t> monotone_types_;
std::vector<double> feature_penalty_;
bool is_finish_load_;
int max_bin_;
std::vector<int32_t> max_bin_by_feature_;
std::vector<std::vector<double>> forced_bin_bounds_;
int bin_construct_sample_cnt_;
int min_data_in_bin_;
bool use_missing_;
bool zero_as_missing_;
};
} // namespace LightGBM
#endif // LightGBM_DATA_H_
|
NAS_FT.c | //---------------------------------------------------------------------
// program FT
//---------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#if !defined(CLASS_W) && !defined(CLASS_S) && !defined(CLASS_A) && !defined(CLASS_B) && !defined(CLASS_C) && !defined(CLASS_D) && !defined(CLASS_E)
# define CLASS_W
#endif
//----------
// Class S:
//----------
#ifdef CLASS_S
# define NX 64
# define NY 64
# define NZ 64
# define MAXDIM 64
# define NITER_DEFAULT 6
# define NXP 65
# define NYP 64
# define NTOTAL 262144
# define NTOTALP 266240
#endif
//----------
// Class W:
//----------
#ifdef CLASS_W
# define NX 128
# define NY 128
# define NZ 32
# define MAXDIM 128
# define NITER_DEFAULT 6
# define NXP 129
# define NYP 128
# define NTOTAL 524288
# define NTOTALP 528384
#endif
//----------
// Class A:
//----------
#ifdef CLASS_A
# define NX 256
# define NY 256
# define NZ 128
# define MAXDIM 256
# define NITER_DEFAULT 6
# define NXP 257
# define NYP 256
# define NTOTAL 8388608
# define NTOTALP 8421376
#endif
//----------
// Class B:
//----------
#ifdef CLASS_B
# define NX 512
# define NY 256
# define NZ 256
# define MAXDIM 512
# define NITER_DEFAULT 20
# define NXP 513
# define NYP 256
# define NTOTAL 33554432
# define NTOTALP 33619968
#endif
//----------
// Class C:
//----------
#ifdef CLASS_C
# define NX 512
# define NY 512
# define NZ 512
# define MAXDIM 512
# define NITER_DEFAULT 20
# define NXP 513
# define NYP 512
# define NTOTAL 134217728
# define NTOTALP 134479872
#endif
//----------
// Class D:
//----------
#ifdef CLASS_D
# define NX 2048
# define NY 1024
# define NZ 1024
# define MAXDIM 2048
# define NITER_DEFAULT 25
# define NXP 2049
# define NYP 1024
# define NTOTAL 2147483648
# define NTOTALP 2148532224
#endif
//----------
// Class E:
//----------
#ifdef CLASS_E
# define NX 4096
# define NY 2048
# define NZ 2048
# define MAXDIM 4096
# define NITER_DEFAULT 25
# define NXP 4097
# define NYP 2048
# define NTOTAL 17179869184
# define NTOTALP 17184063488
#endif
typedef struct
{
double real;
double imag;
} dcomplex;
#define min(x,y) ((x) < (y) ? (x) : (y))
#define max(x,y) ((x) > (y) ? (x) : (y))
#define FFTBLOCK_DEFAULT 16
#define FFTBLOCKPAD_DEFAULT 18
#define CACHESIZE 8192
#define BLOCKMAX 32
#define SEED 314159265.0
#define A 1220703125.0
#define PI 3.141592653589793238
#define ALPHA 1.0e-6
/* common /timerscomm/ */
dcomplex dcmplx_div(dcomplex z1, dcomplex z2)
{
double a = z1.real;
double b = z1.imag;
double c = z2.real;
double d = z2.imag;
double divisor = c * c + d * d;
double real = (a * c + b * d) / divisor;
double imag = (b * c - a * d) / divisor;
dcomplex result = (dcomplex) {real, imag};
return result;
}
#define dcmplx(r,i) (dcomplex){r, i}
#define dcmplx_add(a,b) (dcomplex){(a).real+(b).real, (a).imag+(b).imag}
#define dcmplx_sub(a,b) (dcomplex){(a).real-(b).real, (a).imag-(b).imag}
#define dcmplx_mul(a,b) (dcomplex){((a).real*(b).real)-((a).imag*(b).imag),\
((a).real*(b).imag)+((a).imag*(b).real)}
#define dcmplx_mul2(a,b) (dcomplex){(a).real*(b), (a).imag*(b)}
#define dcmplx_div2(a,b) (dcomplex){(a).real/(b), (a).imag/(b)}
#define dcmplx_abs(x) sqrt(((x).real*(x).real) + ((x).imag*(x).imag))
#define dconjg(x) (dcomplex){(x).real, -1.0*(x).imag}
/* common /blockinfo/ */
int fftblock;
/* common /workarr/ */
dcomplex plane[(BLOCKMAX + 1)*MAXDIM];
dcomplex scr[MAXDIM][BLOCKMAX + 1];
// for checksum data
/* common /sumcomm/ */
dcomplex sums[NITER_DEFAULT + 1];
/* common /mainarrays/ */
double twiddle[NZ][NY][NX + 1];
dcomplex xnt[NZ][NY][NX + 1];
dcomplex y[NZ][NY][NX + 1];
void appft(int niter, double *total_time, int *verified);
void CompExp(int n, dcomplex exponent[n]);
int ilog2(int n);
void CalculateChecksum(dcomplex *csum, int iterN, int d1, int d2, int d3,
dcomplex u[d3][d2][d1 + 1]);
void compute_initial_conditions(int d1, int d2, int d3,
dcomplex u0[d3][d2][d1 + 1]);
void evolve(int nx, int ny, int nz,
dcomplex x[nz][ny][nx + 1], dcomplex y[nz][ny][nx + 1],
double twiddle[nz][ny][nx + 1]);
void fftXYZ(int sign, int n1, int n2, int n3,
dcomplex x[n3][n2][n1 + 1], dcomplex xout[(n1 + 1)*n2 * n3],
dcomplex exp1[n1], dcomplex exp2[n2], dcomplex exp3[n3]);
void verify(int n1, int n2, int n3, int nt, dcomplex cksum[nt + 1],
int *verified);
double randlc( double *x, double a );
void vranlc( int n, double *x, double a, double y[] );
char getclass();
void print_results(char *name, char class, int n1, int n2, int n3, int niter,
double t, double mops, char *optype, int verified);
double start[64], elapsed[64];
double elapsed_time( void );
void timer_clear( int n );
void timer_start( int n );
void timer_stop( int n );
double timer_read( int n );
void wtime(double *t);
int main(int argc, char *argv[])
{
int niter;
char Class;
double total_time, mflops;
int verified;
niter = NITER_DEFAULT;
printf("\n\n NAS Parallel Benchmarks (NPB3.3-SER-C) - FT Benchmark\n\n");
printf(" Size : %4dx%4dx%4d\n", NX, NY, NZ);
printf(" Iterations : %10d\n", niter);
printf("\n");
Class = getclass();
appft(niter, &total_time, &verified);
if (total_time != 0.0)
{
mflops = 1.0e-6 * (double)NTOTAL *
(14.8157 + 7.19641 * log((double)NTOTAL)
+ (5.23518 + 7.21113 * log((double)NTOTAL)) * niter)
/ total_time;
}
else
{
mflops = 0.0;
}
print_results("FT", Class, NX, NY, NZ, niter,
total_time, mflops, " floating point", verified);
int exitValue = verified ? 0 : 1;
return exitValue;
}
char getclass()
{
if ((NX == 64) && (NY == 64) &&
(NZ == 64) && (NITER_DEFAULT == 6))
{
return 'S';
}
else if ((NX == 128) && (NY == 128) &&
(NZ == 32) && (NITER_DEFAULT == 6))
{
return 'W';
}
else if ((NX == 256) && (NY == 256) &&
(NZ == 128) && (NITER_DEFAULT == 6))
{
return 'A';
}
else if ((NX == 512) && (NY == 256) &&
(NZ == 256) && (NITER_DEFAULT == 20))
{
return 'B';
}
else if ((NX == 512) && (NY == 512) &&
(NZ == 512) && (NITER_DEFAULT == 20))
{
return 'C';
}
else if ((NX == 2048) && (NY == 1024) &&
(NZ == 1024) && (NITER_DEFAULT == 25))
{
return 'D';
}
else
{
return 'U';
}
}
void appft(int niter, double *total_time, int *verified)
{
int i, j, k, kt, n12, n22, n32, ii, jj, kk, ii2, ik2;
double ap;
dcomplex exp1[NX], exp2[NY], exp3[NZ];
for (i = 1; i <= 15; i++)
{
timer_clear(i);
}
timer_start(2);
compute_initial_conditions(NX, NY, NZ, xnt);
CompExp(NX, exp1);
CompExp(NY, exp2);
CompExp(NZ, exp3);
fftXYZ(1, NX, NY, NZ, xnt, (dcomplex *)y, exp1, exp2, exp3);
timer_stop(2);
timer_start(1);
n12 = NX / 2;
n22 = NY / 2;
n32 = NZ / 2;
ap = -4.0 * ALPHA * (PI * PI);
for (i = 0; i < NZ; i++)
{
ii = i - (i / n32) * NZ;
ii2 = ii * ii;
#pragma omp parallel for default(shared) private(k, j, kk, ik2, jj) firstprivate(n22, ii2, n12, ap, i)
for (k = 0; k < NY; k++)
{
kk = k - (k / n22) * NY;
ik2 = ii2 + kk * kk;
for (j = 0; j < NX; j++)
{
jj = j - (j / n12) * NX;
twiddle[i][k][j] = exp(ap * (double)(jj * jj + ik2));
}
}
}
compute_initial_conditions(NX, NY, NZ, xnt);
fftXYZ(1, NX, NY, NZ, xnt, (dcomplex *)y, exp1, exp2, exp3);
for (kt = 1; kt <= niter; kt++)
{
evolve(NX, NY, NZ, xnt, y, twiddle);
fftXYZ(-1, NX, NY, NZ, xnt, (dcomplex *)xnt, exp1, exp2, exp3);
CalculateChecksum(&sums[kt], kt, NX, NY, NZ, xnt);
}
// Verification test.
verify(NX, NY, NZ, niter, sums, verified);
timer_stop(1);
*total_time = timer_read(1);
}
//---------------------------------------------------------------------
// compute the roots-of-unity array that will be used for subsequent FFTs.
//---------------------------------------------------------------------
void CompExp(int n, dcomplex exponent[n])
{
int m, nu, ku, i, j, ln;
double t, ti;
const double pi = 3.141592653589793238;
nu = n;
m = ilog2(n);
exponent[0] = dcmplx(m, 0.0);
ku = 2;
ln = 1;
for (j = 1; j <= m; j++)
{
t = pi / ln;
#pragma omp parallel for default(shared) private(i, ti) firstprivate(ln, t, ku)
for (i = 0; i <= ln - 1; i++)
{
ti = i * t;
exponent[i + ku - 1] = dcmplx(cos(ti), sin(ti));
}
ku = ku + ln;
ln = 2 * ln;
}
}
int ilog2(int n)
{
int nn, lg;
if (n == 1) return 0;
lg = 1;
nn = 2;
while (nn < n)
{
nn = nn * 2;
lg = lg + 1;
}
return lg;
}
//---------------------------------------------------------------------
// compute a^exponent mod 2^46
//---------------------------------------------------------------------
double ipow46(double a, int exponent)
{
double result, dummy, q, r;
int n, n2;
//---------------------------------------------------------------------
// Use
// a^n = a^(n/2)*a^(n/2) if n even else
// a^n = a*a^(n-1) if n odd
//---------------------------------------------------------------------
result = 1;
if (exponent == 0) return result;
q = a;
r = 1;
n = exponent;
while (n > 1)
{
n2 = n / 2;
if (n2 * 2 == n)
{
dummy = randlc(&q, q);
n = n2;
}
else
{
dummy = randlc(&r, q);
n = n - 1;
}
}
dummy = randlc(&r, q);
result = r;
return result;
}
void CalculateChecksum(dcomplex *csum, int iterN, int d1, int d2, int d3,
dcomplex u[d3][d2][d1 + 1])
{
int i, i1, ii, ji, ki;
dcomplex csum_temp = dcmplx(0.0, 0.0);
for (i = 1; i <= 1024; i++)
{
i1 = i;
ii = i1 % d1;
ji = 3 * i1 % d2;
ki = 5 * i1 % d3;
csum_temp = dcmplx_add(csum_temp, u[ki][ji][ii]);
}
csum_temp = dcmplx_div2(csum_temp, (double)(d1 * d2 * d3));
printf(" T =%5d Checksum =%22.12E%22.12E\n",
iterN, csum_temp.real, csum_temp.imag);
*csum = csum_temp;
}
void compute_initial_conditions(int d1, int d2, int d3,
dcomplex u0[d3][d2][d1 + 1])
{
dcomplex tmp[MAXDIM];
double x0, start, an, dummy;
double RanStarts[MAXDIM];
int i, j, k;
const double seed = 314159265.0;
const double a = 1220703125.0;
start = seed;
//---------------------------------------------------------------------
// Jump to the starting element for our first plane.
//---------------------------------------------------------------------
an = ipow46(a, 0);
dummy = randlc(&start, an);
an = ipow46(a, 2 * d1 * d2);
//---------------------------------------------------------------------
// Go through by z planes filling in one square at a time.
//---------------------------------------------------------------------
RanStarts[0] = start;
for (k = 1; k < d3; k++)
{
dummy = randlc(&start, an);
RanStarts[k] = start;
}
for (k = 0; k < d3; k++)
{
x0 = RanStarts[k];
for (j = 0; j < d2; j++)
{
vranlc(2 * d1, &x0, a, (double *)tmp);
#pragma omp parallel for default(shared) private(i) firstprivate(d1, k, j, tmp)
for (i = 0; i < d1; i++)
{
u0[k][j][i] = tmp[i];
}
}
}
}
void evolve(int nx, int ny, int nz,
dcomplex x[nz][ny][nx + 1], dcomplex y[nz][ny][nx + 1],
double twiddle[nz][ny][nx + 1])
{
int i, j, k;
for (i = 0; i < nz; i++)
{
for (k = 0; k < ny; k++)
{
for (j = 0; j < nx; j++)
{
y[i][k][j] = dcmplx_mul2(y[i][k][j], twiddle[i][k][j]);
x[i][k][j] = y[i][k][j];
}
}
}
}
//---------------------------------------------------------------------
// Computes NY N-point complex-to-complex FFTs of X using an algorithm due
// to Swarztrauber. X is both the input and the output array, while Y is a
// scratch array. It is assumed that N = 2^M. Before calling
// Swarztrauber to
// perform FFTs
//---------------------------------------------------------------------
void Swarztrauber(int is, int m, int vlen, int n, int xd1,
void *ox, dcomplex exponent[n])
{
dcomplex (*x)[xd1] = (dcomplex (*)[xd1])ox;
int i, j, l;
dcomplex u1, x11, x21;
int k, n1, li, lj, lk, ku, i11, i12, i21, i22;
//---------------------------------------------------------------------
// Perform one variant of the Stockham FFT.
//---------------------------------------------------------------------
n1 = n / 2;
lj = 1;
li = 1 << m;
for (l = 1; l <= m; l += 2)
{
lk = lj;
lj = 2 * lk;
li = li / 2;
ku = li;
for (i = 0; i <= li - 1; i++)
{
i11 = i * lk;
i12 = i11 + n1;
i21 = i * lj;
i22 = i21 + lk;
if (is >= 1)
{
u1 = exponent[ku + i];
}
else
{
u1 = dconjg(exponent[ku + i]);
}
for (k = 0; k <= lk - 1; k++)
{
#pragma omp parallel for default(shared) private(j, x11, x21) firstprivate(vlen, i11, k, i12, i21, u1, i22, x)
for (j = 0; j < vlen; j++)
{
x11 = x[i11 + k][j];
x21 = x[i12 + k][j];
scr[i21 + k][j] = dcmplx_add(x11, x21);
scr[i22 + k][j] = dcmplx_mul(u1, dcmplx_sub(x11, x21));
}
}
}
if (l == m)
{
#pragma omp parallel for default(shared) private(k, j) firstprivate(n, vlen, scr)
for (k = 0; k < n; k++)
{
for (j = 0; j < vlen; j++)
{
x[k][j] = scr[k][j];
}
}
}
else
{
lk = lj;
lj = 2 * lk;
li = li / 2;
ku = li;
for (i = 0; i <= li - 1; i++)
{
i11 = i * lk;
i12 = i11 + n1;
i21 = i * lj;
i22 = i21 + lk;
if (is >= 1)
{
u1 = exponent[ku + i];
}
else
{
u1 = dconjg(exponent[ku + i]);
}
for (k = 0; k <= lk - 1; k++)
{
#pragma omp parallel for default(shared) private(j, x11, x21) firstprivate(vlen, i11, k, i12, i21, u1, i22, scr)
for (j = 0; j < vlen; j++)
{
x11 = scr[i11 + k][j];
x21 = scr[i12 + k][j];
x[i21 + k][j] = dcmplx_add(x11, x21);
x[i22 + k][j] = dcmplx_mul(u1, dcmplx_sub(x11, x21));
}
}
}
}
}
}
void fftXYZ(int sign, int n1, int n2, int n3,
dcomplex x[n3][n2][n1 + 1], dcomplex xout[(n1 + 1)*n2 * n3],
dcomplex exp1[n1], dcomplex exp2[n2], dcomplex exp3[n3])
{
int i, j, k, log;
int bls, ble;
int len;
int blkp;
fftblock = CACHESIZE / n1;
if (fftblock >= BLOCKMAX) fftblock = BLOCKMAX;
blkp = fftblock + 1;
log = ilog2(n1);
for (k = 0; k < n3; k++)
{
for (bls = 0; bls < n2; bls += fftblock)
{
ble = bls + fftblock - 1;
if (ble > n2) ble = n2 - 1;
len = ble - bls + 1;
#pragma omp parallel for default(shared) private(j, i) firstprivate(bls, ble, n1, blkp, k, x)
for (j = bls; j <= ble; j++)
{
for (i = 0; i < n1; i++)
{
plane[j - bls + blkp * i] = x[k][j][i];
}
}
Swarztrauber(sign, log, len, n1, blkp, plane, exp1);
#pragma omp parallel for default(shared) private(j, i) firstprivate(bls, ble, n1, blkp, k, plane)
for (j = bls; j <= ble; j++)
{
for (i = 0; i < n1; i++)
{
x[k][j][i] = plane[j - bls + blkp * i];
}
}
}
}
fftblock = CACHESIZE / n2;
if (fftblock >= BLOCKMAX) fftblock = BLOCKMAX;
blkp = fftblock + 1;
log = ilog2(n2);
for (k = 0; k < n3; k++)
{
for (bls = 0; bls < n1; bls += fftblock)
{
ble = bls + fftblock - 1;
if (ble > n1) ble = n1 - 1;
len = ble - bls + 1;
Swarztrauber(sign, log, len, n2, n1 + 1, &x[k][0][bls], exp2);
}
}
fftblock = CACHESIZE / n3;
if (fftblock >= BLOCKMAX) fftblock = BLOCKMAX;
blkp = fftblock + 1;
log = ilog2(n3);
for (k = 0; k < n2; k++)
{
for (bls = 0; bls < n1; bls += fftblock)
{
ble = bls + fftblock - 1;
if (ble > n1) ble = n1 - 1;
len = ble - bls + 1;
#pragma omp parallel for default(shared) private(i, j) firstprivate(n3, bls, ble, blkp, k, x)
for (i = 0; i < n3; i++)
{
for (j = bls; j <= ble; j++)
{
plane[j - bls + blkp * i] = x[i][k][j];
}
}
Swarztrauber(sign, log, len, n3, blkp, plane, exp3);
#pragma omp parallel for default(shared) private(i, j) firstprivate(n3, bls, ble, n2, n1, k, blkp, plane)
for (i = 0; i <= n3 - 1; i++)
{
for (j = bls; j <= ble; j++)
{
xout[j + (n1 + 1) * (k + n2 * i)] = plane[j - bls + blkp * i];
}
}
}
}
}
// FT verification routine.
void verify(int n1, int n2, int n3, int nt, dcomplex cksum[nt + 1],
int *verified)
{
// Local variables.
int kt;
dcomplex cexpd[25 + 1];
double epsilon, err;
// Initialize tolerance level and success flag.
epsilon = 1.0e-12;
*verified = 1;
if ((n1 == 64) && (n2 == 64) && (n3 == 64) && (nt == 6))
{
// Class S reference values.
cexpd[1] = dcmplx(554.6087004964, 484.5363331978);
cexpd[2] = dcmplx(554.6385409189, 486.5304269511);
cexpd[3] = dcmplx(554.6148406171, 488.3910722336);
cexpd[4] = dcmplx(554.5423607415, 490.1273169046);
cexpd[5] = dcmplx(554.4255039624, 491.7475857993);
cexpd[6] = dcmplx(554.2683411902, 493.2597244941);
}
else if ((n1 == 128) && (n2 == 128) && (n3 == 32) && (nt == 6))
{
// Class W reference values.
cexpd[1] = dcmplx(567.3612178944, 529.3246849175);
cexpd[2] = dcmplx(563.1436885271, 528.2149986629);
cexpd[3] = dcmplx(559.4024089970, 527.0996558037);
cexpd[4] = dcmplx(556.0698047020, 526.0027904925);
cexpd[5] = dcmplx(553.0898991250, 524.9400845633);
cexpd[6] = dcmplx(550.4159734538, 523.9212247086);
}
else if ((n1 == 256) && (n2 == 256) && (n3 == 128) && (nt == 6))
{
// Class A reference values.
cexpd[1] = dcmplx(504.6735008193, 511.4047905510);
cexpd[2] = dcmplx(505.9412319734, 509.8809666433);
cexpd[3] = dcmplx(506.9376896287, 509.8144042213);
cexpd[4] = dcmplx(507.7892868474, 510.1336130759);
cexpd[5] = dcmplx(508.5233095391, 510.4914655194);
cexpd[6] = dcmplx(509.1487099959, 510.7917842803);
}
else if ((n1 == 512) && (n2 == 256) && (n3 == 256) && (nt == 20))
{
// Class B reference values.
cexpd[1] = dcmplx(517.7643571579, 507.7803458597);
cexpd[2] = dcmplx(515.4521291263, 508.8249431599);
cexpd[3] = dcmplx(514.6409228649, 509.6208912659);
cexpd[4] = dcmplx(514.2378756213, 510.1023387619);
cexpd[5] = dcmplx(513.9626667737, 510.3976610617);
cexpd[6] = dcmplx(513.7423460082, 510.5948019802);
cexpd[7] = dcmplx(513.5547056878, 510.7404165783);
cexpd[8] = dcmplx(513.3910925466, 510.8576573661);
cexpd[9] = dcmplx(513.2470705390, 510.9577278523);
cexpd[10] = dcmplx(513.1197729984, 511.0460304483);
cexpd[11] = dcmplx(513.0070319283, 511.1252433800);
cexpd[12] = dcmplx(512.9070537032, 511.1968077718);
cexpd[13] = dcmplx(512.8182883502, 511.2616233064);
cexpd[14] = dcmplx(512.7393733383, 511.3203605551);
cexpd[15] = dcmplx(512.6691062020, 511.3735928093);
cexpd[16] = dcmplx(512.6064276004, 511.4218460548);
cexpd[17] = dcmplx(512.5504076570, 511.4656139760);
cexpd[18] = dcmplx(512.5002331720, 511.5053595966);
cexpd[19] = dcmplx(512.4551951846, 511.5415130407);
cexpd[20] = dcmplx(512.4146770029, 511.5744692211);
}
else if ((n1 == 512) && (n2 == 512) && (n3 == 512) && (nt == 20))
{
// Class C reference values.
cexpd[1] = dcmplx(519.5078707457, 514.9019699238);
cexpd[2] = dcmplx(515.5422171134, 512.7578201997);
cexpd[3] = dcmplx(514.4678022222, 512.2251847514);
cexpd[4] = dcmplx(514.0150594328, 512.1090289018);
cexpd[5] = dcmplx(513.7550426810, 512.1143685824);
cexpd[6] = dcmplx(513.5811056728, 512.1496764568);
cexpd[7] = dcmplx(513.4569343165, 512.1870921893);
cexpd[8] = dcmplx(513.3651975661, 512.2193250322);
cexpd[9] = dcmplx(513.2955192805, 512.2454735794);
cexpd[10] = dcmplx(513.2410471738, 512.2663649603);
cexpd[11] = dcmplx(513.1971141679, 512.2830879827);
cexpd[12] = dcmplx(513.1605205716, 512.2965869718);
cexpd[13] = dcmplx(513.1290734194, 512.3075927445);
cexpd[14] = dcmplx(513.1012720314, 512.3166486553);
cexpd[15] = dcmplx(513.0760908195, 512.3241541685);
cexpd[16] = dcmplx(513.0528295923, 512.3304037599);
cexpd[17] = dcmplx(513.0310107773, 512.3356167976);
cexpd[18] = dcmplx(513.0103090133, 512.3399592211);
cexpd[19] = dcmplx(512.9905029333, 512.3435588985);
cexpd[20] = dcmplx(512.9714421109, 512.3465164008);
}
else if ((n1 == 2048) && (n2 == 1024) && (n3 == 1024) && (nt == 25))
{
// Class D reference values.
cexpd[1] = dcmplx(512.2230065252, 511.8534037109);
cexpd[2] = dcmplx(512.0463975765, 511.7061181082);
cexpd[3] = dcmplx(511.9865766760, 511.7096364601);
cexpd[4] = dcmplx(511.9518799488, 511.7373863950);
cexpd[5] = dcmplx(511.9269088223, 511.7680347632);
cexpd[6] = dcmplx(511.9082416858, 511.7967875532);
cexpd[7] = dcmplx(511.8943814638, 511.8225281841);
cexpd[8] = dcmplx(511.8842385057, 511.8451629348);
cexpd[9] = dcmplx(511.8769435632, 511.8649119387);
cexpd[10] = dcmplx(511.8718203448, 511.8820803844);
cexpd[11] = dcmplx(511.8683569061, 511.8969781011);
cexpd[12] = dcmplx(511.8661708593, 511.9098918835);
cexpd[13] = dcmplx(511.8649768950, 511.9210777066);
cexpd[14] = dcmplx(511.8645605626, 511.9307604484);
cexpd[15] = dcmplx(511.8647586618, 511.9391362671);
cexpd[16] = dcmplx(511.8654451572, 511.9463757241);
cexpd[17] = dcmplx(511.8665212451, 511.9526269238);
cexpd[18] = dcmplx(511.8679083821, 511.9580184108);
cexpd[19] = dcmplx(511.8695433664, 511.9626617538);
cexpd[20] = dcmplx(511.8713748264, 511.9666538138);
cexpd[21] = dcmplx(511.8733606701, 511.9700787219);
cexpd[22] = dcmplx(511.8754661974, 511.9730095953);
cexpd[23] = dcmplx(511.8776626738, 511.9755100241);
cexpd[24] = dcmplx(511.8799262314, 511.9776353561);
cexpd[25] = dcmplx(511.8822370068, 511.9794338060);
}
else if ((n1 == 4096) && (n2 == 2048) && (n3 == 2048) && (nt == 25))
{
// Class E reference values.
cexpd[1] = dcmplx(512.1601045346, 511.7395998266);
cexpd[2] = dcmplx(512.0905403678, 511.8614716182);
cexpd[3] = dcmplx(512.0623229306, 511.9074203747);
cexpd[4] = dcmplx(512.0438418997, 511.9345900733);
cexpd[5] = dcmplx(512.0311521872, 511.9551325550);
cexpd[6] = dcmplx(512.0226088809, 511.9720179919);
cexpd[7] = dcmplx(512.0169296534, 511.9861371665);
cexpd[8] = dcmplx(512.0131225172, 511.9979364402);
cexpd[9] = dcmplx(512.0104767108, 512.0077674092);
cexpd[10] = dcmplx(512.0085127969, 512.0159443121);
cexpd[11] = dcmplx(512.0069224127, 512.0227453670);
cexpd[12] = dcmplx(512.0055158164, 512.0284096041);
cexpd[13] = dcmplx(512.0041820159, 512.0331373793);
cexpd[14] = dcmplx(512.0028605402, 512.0370938679);
cexpd[15] = dcmplx(512.0015223011, 512.0404138831);
cexpd[16] = dcmplx(512.0001570022, 512.0432068837);
cexpd[17] = dcmplx(511.9987650555, 512.0455615860);
cexpd[18] = dcmplx(511.9973525091, 512.0475499442);
cexpd[19] = dcmplx(511.9959279472, 512.0492304629);
cexpd[20] = dcmplx(511.9945006558, 512.0506508902);
cexpd[21] = dcmplx(511.9930795911, 512.0518503782);
cexpd[22] = dcmplx(511.9916728462, 512.0528612016);
cexpd[23] = dcmplx(511.9902874185, 512.0537101195);
cexpd[24] = dcmplx(511.9889291565, 512.0544194514);
cexpd[25] = dcmplx(511.9876028049, 512.0550079284);
}
else
{
printf(" Verification test for FT not performed\n");
*verified = 0;
}
// Verification test for results.
if (*verified)
{
for (kt = 1; kt <= nt; kt++)
{
err = dcmplx_abs(dcmplx_div(dcmplx_sub(cksum[kt], cexpd[kt]),
cexpd[kt]));
if (!(err <= epsilon))
{
*verified = 0;
break;
}
}
if (*verified)
{
printf(" Verification test for FT successful\n");
}
else
{
printf(" Verification test for FT failed\n");
}
}
}
void print_results(char *name, char class, int n1, int n2, int n3, int niter,
double t, double mops, char *optype, int verified)
{
char size[16];
int j;
printf( "\n\n %s Benchmark Completed.\n", name );
printf( " Class = %12c\n", class );
// If this is not a grid-based problem (EP, FT, CG), then
// we only print n1, which contains some measure of the
// problem size. In that case, n2 and n3 are both zero.
// Otherwise, we print the grid size n1xn2xn3
if ( ( n2 == 0 ) && ( n3 == 0 ) )
{
if ( ( name[0] == 'E' ) && ( name[1] == 'P' ) )
{
sprintf( size, "%15.0lf", pow(2.0, n1) );
j = 14;
if ( size[j] == '.' )
{
size[j] = ' ';
j--;
}
size[j + 1] = '\0';
printf( " Size = %15s\n", size );
}
else
{
printf( " Size = %12d\n", n1 );
}
}
else
{
printf( " Size = %4dx%4dx%4d\n", n1, n2, n3 );
}
printf( " Iterations = %12d\n", niter );
printf( " Time in seconds = %12.2lf\n", t );
printf( " Mop/s total = %15.2lf\n", mops );
printf( " Operation type = %24s\n", optype );
if ( verified )
printf( " Verification = %12s\n", "SUCCESSFUL" );
else
printf( " Verification = %12s\n", "UNSUCCESSFUL" );
}
double randlc( double *x, double a )
{
//--------------------------------------------------------------------
//
// This routine returns a uniform pseudorandom double precision number in the
// range (0, 1) by using the linear congruential generator
//
// x_{k+1} = a x_k (mod 2^46)
//
// where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
// before repeating. The argument A is the same as 'a' in the above formula,
// and X is the same as x_0. A and X must be odd double precision integers
// in the range (1, 2^46). The returned value RANDLC is normalized to be
// between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain
// the new seed x_1, so that subsequent calls to RANDLC using the same
// arguments will generate a continuous sequence.
//
// This routine should produce the same results on any computer with at least
// 48 mantissa bits in double precision floating point data. On 64 bit
// systems, double precision should be disabled.
//
// David H. Bailey October 26, 1990
//
//--------------------------------------------------------------------
// r23 = pow(0.5, 23.0);
//// pow(0.5, 23.0) = 1.1920928955078125e-07
// r46 = r23 * r23;
// t23 = pow(2.0, 23.0);
//// pow(2.0, 23.0) = 8.388608e+06
// t46 = t23 * t23;
const double r23 = 1.1920928955078125e-07;
const double r46 = r23 * r23;
const double t23 = 8.388608e+06;
const double t46 = t23 * t23;
double t1, t2, t3, t4, a1, a2, x1, x2, z;
double r;
//--------------------------------------------------------------------
// Break A into two parts such that A = 2^23 * A1 + A2.
//--------------------------------------------------------------------
t1 = r23 * a;
a1 = (int) t1;
a2 = a - t23 * a1;
//--------------------------------------------------------------------
// Break X into two parts such that X = 2^23 * X1 + X2, compute
// Z = A1 * X2 + A2 * X1 (mod 2^23), and then
// X = 2^23 * Z + A2 * X2 (mod 2^46).
//--------------------------------------------------------------------
t1 = r23 * (*x);
x1 = (int) t1;
x2 = *x - t23 * x1;
t1 = a1 * x2 + a2 * x1;
t2 = (int) (r23 * t1);
z = t1 - t23 * t2;
t3 = t23 * z + a2 * x2;
t4 = (int) (r46 * t3);
*x = t3 - t46 * t4;
r = r46 * (*x);
return r;
}
void vranlc( int n, double *x, double a, double y[] )
{
//--------------------------------------------------------------------
//
// This routine generates N uniform pseudorandom double precision numbers in
// the range (0, 1) by using the linear congruential generator
//
// x_{k+1} = a x_k (mod 2^46)
//
// where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
// before repeating. The argument A is the same as 'a' in the above formula,
// and X is the same as x_0. A and X must be odd double precision integers
// in the range (1, 2^46). The N results are placed in Y and are normalized
// to be between 0 and 1. X is updated to contain the new seed, so that
// subsequent calls to VRANLC using the same arguments will generate a
// continuous sequence. If N is zero, only initialization is performed, and
// the variables X, A and Y are ignored.
//
// This routine is the standard version designed for scalar or RISC systems.
// However, it should produce the same results on any single processor
// computer with at least 48 mantissa bits in double precision floating point
// data. On 64 bit systems, double precision should be disabled.
//
//--------------------------------------------------------------------
// r23 = pow(0.5, 23.0);
//// pow(0.5, 23.0) = 1.1920928955078125e-07
// r46 = r23 * r23;
// t23 = pow(2.0, 23.0);
//// pow(2.0, 23.0) = 8.388608e+06
// t46 = t23 * t23;
const double r23 = 1.1920928955078125e-07;
const double r46 = r23 * r23;
const double t23 = 8.388608e+06;
const double t46 = t23 * t23;
double t1, t2, t3, t4, a1, a2, x1, x2, z;
int i;
//--------------------------------------------------------------------
// Break A into two parts such that A = 2^23 * A1 + A2.
//--------------------------------------------------------------------
t1 = r23 * a;
a1 = (int) t1;
a2 = a - t23 * a1;
//--------------------------------------------------------------------
// Generate N results. This loop is not vectorizable.
//--------------------------------------------------------------------
for ( i = 0; i < n; i++ )
{
//--------------------------------------------------------------------
// Break X into two parts such that X = 2^23 * X1 + X2, compute
// Z = A1 * X2 + A2 * X1 (mod 2^23), and then
// X = 2^23 * Z + A2 * X2 (mod 2^46).
//--------------------------------------------------------------------
t1 = r23 * (*x);
x1 = (int) t1;
x2 = *x - t23 * x1;
t1 = a1 * x2 + a2 * x1;
t2 = (int) (r23 * t1);
z = t1 - t23 * t2;
t3 = t23 * z + a2 * x2;
t4 = (int) (r46 * t3) ;
*x = t3 - t46 * t4;
y[i] = r46 * (*x);
}
return;
}
void wtime(double *t)
{
static int sec = -1;
struct timeval tv;
gettimeofday(&tv, (void *)0);
if (sec < 0) sec = tv.tv_sec;
*t = (tv.tv_sec - sec) + 1.0e-6 * tv.tv_usec;
}
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time( void )
{
double t;
wtime( &t );
return ( t );
}
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear( int n )
{
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start( int n )
{
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop( int n )
{
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read( int n )
{
return ( elapsed[n] );
}
|
PhysicalSystemFEM.h | //Design Notes:
//Physical System:
//MultiVector<ElementTypes>
//MultiVector<DOFType>
//Store DOFs in continguous memory
//Each element
// Quadrature
// Energy(ShapeFunction, Energy, position) -> Energy(ShapeFunction(position));
// ShapeFunction + Kinematics (DOF + gradients I might need ....)
// Gradient
// Hessian (maybe, I'm not sure)
// How to represent DOFS ? Pointers to list from Physical System ? Has to be, since elements like forces are connections
// Energy
// //Gradient
// //Hessian
//How should lists of DOFS work ?
// Container<DOFs>
#ifndef PHYSICALSYSTEMFEM_H
#define PHYSICALSYSTEMFEM_H
#include <vector>
#include <DOFParticle.h>
#include <DOFList.h>
#include <UtilitiesEigen.h>
namespace Gauss {
namespace FEM {
template<typename DataType, typename ElementType>
class PhysicalSystemFEMImpl
{
public:
//temporary global indices until I update the state to give these to me
//automatically
PhysicalSystemFEMImpl(const Eigen::Ref<Eigen::MatrixXd > &V, const Eigen::Ref<Eigen::MatrixXi> &F) : m_q(V.rows()), m_qDot(V.rows()) {
m_V = V.template cast<DataType>();
m_F = F;
m_numVerts = m_V.rows();
m_numElements = m_F.rows();
assert(m_V.cols() == 3); //3D only for now
//initialize all the elements
Eigen::MatrixXi Felement;
std::array<DOFBase<DataType,0> *, ElementType::numDOFs()> qDOFArray;
std::array<DOFBase<DataType,1> *, ElementType::numDOFs()> qDotDOFArray;
for(unsigned int iel=0; iel < m_numElements; iel++) {
for(unsigned int idof=0;idof < ElementType::numDOFs(); ++idof) {
qDOFArray[idof] = &m_q[F(iel,idof)];
qDotDOFArray[idof] = &m_qDot[F(iel,idof)];
}
Felement = m_F.row(iel);
m_elements.push_back(
new ElementType(m_V,Felement, qDOFArray, qDotDOFArray)
);
}
}
~PhysicalSystemFEMImpl() {
}
DataType getEnergy(const State<DataType> &state) const {
double energy = 0.0;
for(auto &element : m_elements) {
energy += element->getEnergy(state);
}
return energy;
}
DataType getKineticEnergy(const State<DataType> &state) const {
double energy = 0.0;
for(auto &element : m_elements) {
energy += element->getKineticEnergy(state);
}
return energy;
}
DataType getBodyForceEnergy(const State<DataType> &state) const {
DataType energy = 0.0;
#if defined(_WIN32) || defined(_WIN64) || defined (WIN32)
for(auto &element : m_elements) {
energy += element->getBodyForceWork(state);
}
#else
#pragma omp parallel for reduction(+: energy)
for(unsigned int ii=0; ii<m_elements.size(); ++ii) {
energy = energy + m_elements[ii]->getBodyForceWork(state);
}
#endif
return energy;
}
DataType getStrainEnergy(const State<DataType> &state) const {
DataType energy = 0.0;
#if defined(_WIN32) || defined(_WIN64) || defined (WIN32)
for(auto &element : m_elements) {
energy += element->getStrainEnergy(state);
}
#else
#pragma omp parallel for reduction(+: energy)
for(unsigned int ii=0; ii<m_elements.size(); ++ii) {
energy = energy + m_elements[ii]->getStrainEnergy(state);
}
#endif
return energy;
}
decltype(auto) getStrainEnergyPerElement(const State<DataType> &state) const {
Eigen::VectorXx<DataType> energyPerElement(m_elements.size());
for(int i=0; i < m_elements.size(); i++) {
energyPerElement[i] = m_elements[i]->getStrainEnergy(state);
}
return energyPerElement;
}
template<typename Assembler>
inline void getMassMatrix(Assembler &assembler, const State<DataType> &state) const {
//call the assembler on all elements
forLoop<IsParallel<Assembler>::value>(m_elements, assembler, [&](auto &assemble, auto &element) {
element->getMassMatrix(assemble,state);
});
}
template<typename Assembler>
inline void getStiffnessMatrix(Assembler &assembler, const State<DataType> &state) const {
forLoop<IsParallel<Assembler>::value>(m_elements, assembler, [&](auto &assemble, auto &element) {
element->getStiffnessMatrix(assemble, state);
});
}
template<typename Assembler>
inline void getForce(Assembler &assembler, const State<DataType> &state) const {
forLoop<IsParallel<Assembler>::value>(m_elements, assembler, [&](auto &assemble, auto &element) {
element->getForce(assemble, state);
});
}
template<typename Assembler>
inline void getInternalForce(Assembler &assembler, const State<DataType> &state) const {
forLoop<IsParallel<Assembler>::value>(m_elements, assembler, [&](auto &assemble, auto &element) {
element->getInternalForce(assemble, state);
});
}
template<typename Assembler>
inline void getBodyForce(Assembler &assembler, const State<DataType> &state) const {
forLoop<IsParallel<Assembler>::value>(m_elements, assembler, [&](auto &assemble, auto &element) {
element->getBodyForce(assemble, state);
});
}
inline unsigned int getNumElements() { return m_elements.size(); }
inline ElementType * getElement(unsigned int i) {
assert(i < m_elements.size());
return m_elements[i];
}
inline std::vector<ElementType *> & getElements() { return m_elements; }
inline const std::vector<ElementType *> & getElements() const { return m_elements; }
inline const ElementType * getElement(unsigned int i) const {
assert(i < m_elements.size());
return m_elements[i];
}
inline auto & getQ() { return m_q; }
inline const auto & getQ() const { return m_q; }
inline auto & getQDot() { return m_qDot; }
inline const auto & getQDot() const { return m_qDot; }
//get function supporting a vertex (these return arrays in order to slot directly into assemblers)
inline decltype(auto) getQ(unsigned int vertexId) const {
std::array<const DOFBase<DataType,0> *,1> toReturn = {{&m_q[vertexId]}};
return toReturn;
}
inline decltype(auto) getQDot(unsigned int vertexId) const {
std::array<const DOFBase<DataType,1> *,1> toReturn = {{&m_qDot[vertexId]}};
return toReturn;
}
template<typename Vector>
inline decltype(auto) getQ(Vector &x, unsigned int elementId) const {
std::cout<<"Error not implemented \n";
exit(0);
std::array<const DOFBase<DataType,0> *, 1> toReturn = {{&m_q[elementId]}};
return toReturn;
}
template<typename Vector>
inline decltype(auto) getQDot(Vector &x, unsigned int elementId) const {
std::cout<<"Error not implemented \n";
exit(0);
std::array<const DOFBase<DataType,1> *,1> toReturn = {{&m_qDot[elementId]}};
return toReturn;
}
inline auto & getV() { return m_V; }
inline auto & getF() { return m_F; }
inline const auto & getV() const { return m_V; }
inline const auto & getF() const { return m_F; }
//methods for getting current positions and position Jacobians for this system
//Per-Vertex
inline const auto getPosition(const State<DataType> &state, unsigned int vertexId) const {
return getV().row(vertexId).transpose() + mapDOFEigen(m_q[vertexId], state);
}
inline const auto getVelocity(const State<DataType> &state, unsigned int vertexId) const {
return mapDOFEigen(m_qDot[vertexId], state);
}
inline const auto getDPDQ(const State<DataType> &state, unsigned int vertexId) const {
return Eigen::Matrix33x<DataType>::Identity();
}
inline const auto getDPDQ(const State<DataType> &state, unsigned int elementId, const Eigen::Vector3x<DataType> &pos) const {
exit(0);
return Eigen::Matrix33x<DataType>::Identity();
}
//want these for elements as well (i.e take an element indec and a point in space and return the right value)
inline auto getGeometry() { return std::make_pair(std::ref(m_V), std::ref(m_F)); }
inline const auto getGeometry() const { return std::make_pair(std::ref(m_V), std::ref(m_F)); }
protected:
//Mesh
Eigen::MatrixXx<DataType> m_V;
Eigen::MatrixXi m_F;
long m_numVerts;
long m_numElements;
DOFList<DataType, DOFParticle, 0> m_q;
DOFList<DataType, DOFParticle, 1> m_qDot;
std::vector<ElementType *> m_elements;
//DataType m_mass; //mass of particle
//DOFParticle<DataType,0> m_x;
//DOFParticle<DataType,1> m_xDot;
private:
};
template<typename DataType, template <typename A> class ElementType>
using PhysicalSystemFEM = PhysicalSystem<DataType, PhysicalSystemFEMImpl<DataType, ElementType<DataType> > >;
}
}
#endif
|
ProgressBar.h | /**
* Copyright (c) 2017 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#pragma once
#include "saiga/config.h"
#include "saiga/core/util/Thread/threadName.h"
#include "saiga/core/util/assert.h"
#include <atomic>
#include <iostream>
#include <mutex>
#include <string>
namespace Saiga
{
/**
* A synchronized progress bar for console output.
* You must not write to the given stream while the progress bar is active.
*
* Usage Parallel Image loading:
*
* SyncedConsoleProgressBar loadingBar(std::cout, "Loading " + to_string(N) + " images ", N);
* #pragma omp parallel for
* for (int i = 0; i < N; ++i)
* {
* images[i].load("...");
* loadingBar.addProgress(1);
* }
*
*/
struct SyncedConsoleProgressBar
{
SyncedConsoleProgressBar(std::ostream& strm, const std::string header, int end, int length = 30)
: strm(strm), header(header), end(end), length(length)
{
SAIGA_ASSERT(end >= 0);
print();
run();
}
~SyncedConsoleProgressBar()
{
running = false;
st.join();
}
void addProgress(int i) { current += i; }
private:
std::ostream& strm;
ScopedThread st;
std::string header;
std::atomic_bool running = true;
std::atomic_int current = 0;
int end;
int length;
void run()
{
st = ScopedThread([this]() {
while (running && current.load() < end)
{
print();
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
print();
strm << "Done." << std::endl;
});
}
void print()
{
SAIGA_ASSERT(current <= end);
strm << "\r" << header << " [";
auto progress = double(current) / end;
int barLength = progress * length;
for (auto i = 0; i < barLength; ++i)
{
strm << "=";
}
for (auto i = barLength; i < length; ++i) strm << " ";
strm << "] " << progress * 100 << "% " << std::flush;
}
};
} // namespace Saiga
|
NGmerge.c | /*
John M. Gaspar (jsh58@wildcats.unh.edu)
April 2015 (updated 2016, 2017)
Analyzing paired-end reads for overlaps. Two modes:
- 'stitch': producing a single, merged read for reads
with sufficient overlaps
- 'adapter-removal': removing adapters (3' overhangs
of stitched alignment) from individual reads
Version 0.3
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <getopt.h>
#include <zlib.h>
#include <omp.h>
#include "NGmerge.h"
/* void printVersion()
* Print version and copyright.
*/
void printVersion(void) {
fprintf(stderr, "NGmerge, version %s\n", VERSION);
fprintf(stderr, "Copyright (C) 2017 John M. Gaspar (jsh58@wildcats.unh.edu)\n");
exit(-1);
}
/* void usage()
* Prints usage information.
*/
void usage(int exitval) {
fprintf(stderr, "Usage: NGmerge {-%c <file> -%c <file>", FIRST, SECOND);
fprintf(stderr, " -%c <file>} [optional arguments]\n", OUTFILE);
fprintf(stderr, "Required arguments:\n");
fprintf(stderr, " -%c <file> Input FASTQ file with reads from forward direction\n", FIRST);
fprintf(stderr, " -%c <file> Input FASTQ file with reads from reverse direction\n", SECOND);
fprintf(stderr, " -%c <file> Output FASTQ file(s):\n", OUTFILE);
fprintf(stderr, " - in 'stitch' mode (def.), the file of merged reads\n");
fprintf(stderr, " - in 'adapter-removal' mode (-%c), the output files\n", ADAPTOPT);
fprintf(stderr, " will be <file>%s and <file>%s\n", ONEEXT, TWOEXT);
fprintf(stderr, "Alignment parameters:\n");
fprintf(stderr, " -%c <int> Minimum overlap of the paired-end reads (def. %d)\n", OVERLAP, DEFOVER);
fprintf(stderr, " -%c <float> Mismatches to allow in the overlapped region\n", MISMATCH);
fprintf(stderr, " (a fraction of the overlap length; def. %.2f)\n", DEFMISM);
fprintf(stderr, " -%c Use 'adapter-removal' mode (also sets -%c option)\n", ADAPTOPT, DOVEOPT);
fprintf(stderr, " -%c Option to check for dovetailing (with 3' overhangs)\n", DOVEOPT);
fprintf(stderr, " -%c <int> Minimum overlap of dovetailed alignments (def. %d)\n", DOVEOVER, DEFDOVE);
fprintf(stderr, " -%c Option to produce shortest stitched read\n", MAXOPT);
fprintf(stderr, "I/O options:\n");
fprintf(stderr, " -%c <file> Log file for stitching results of each read pair\n", LOGFILE);
fprintf(stderr, " -%c <file> FASTQ files for reads that failed stitching\n", UNFILE);
fprintf(stderr, " (output as <file>%s and <file>%s)\n", ONEEXT, TWOEXT);
fprintf(stderr, " -%c <file> Log file for dovetailed reads (adapter sequences)\n", DOVEFILE);
fprintf(stderr, " -%c <file> Log file for formatted alignments of merged reads\n", ALNFILE);
fprintf(stderr, " -%c/-%c Option to gzip (-%c) or not (-%c) FASTQ output(s)\n", GZOPT, UNGZOPT, GZOPT, UNGZOPT);
fprintf(stderr, " -%c Option to produce interleaved FASTQ output(s)\n", INTEROPT);
fprintf(stderr, " -%c <file> Use given error profile for merged qual scores\n", QUALFILE);
fprintf(stderr, " -%c Use 'fastq-join' method for merged qual scores\n", FJOINOPT);
fprintf(stderr, " -%c <int> FASTQ quality offset (def. %d)\n", QUALITY, OFFSET);
fprintf(stderr, " -%c <int> Maximum input quality score (0-based; def. %d)\n", SETQUAL, MAXQUAL);
fprintf(stderr, " -%c <int> Number of threads to use (def. %d)\n", THREADS, DEFTHR);
fprintf(stderr, " -%c Option to print status updates/counts to stderr\n", VERBOSE);
exit(exitval);
}
/* int error()
* Prints an error message.
*/
int error(char* msg, enum errCode err) {
fprintf(stderr, "Error! %s%s\n", msg, errMsg[err]);
return -1;
}
/* void* memalloc()
* Allocates a heap block.
*/
void* memalloc(int size) {
void* ans = malloc(size);
if (ans == NULL)
exit(error("", ERRMEM));
return ans;
}
/* float getFloat(char*)
* Converts the given char* to a float.
*/
float getFloat(char* in) {
char* endptr;
float ans = strtof(in, &endptr);
if (*endptr != '\0')
exit(error(in, ERRFLOAT));
return ans;
}
/* int getInt(char*)
* Converts the given char* to an int.
*/
int getInt(char* in) {
char* endptr;
int ans = (int) strtol(in, &endptr, 10);
if (*endptr != '\0')
exit(error(in, ERRINT));
return ans;
}
/* char rc(char)
* Returns the complement of the given base.
*/
char rc(char in) {
char out;
if (in == 'A') out = 'T';
else if (in == 'T') out = 'A';
else if (in == 'C') out = 'G';
else if (in == 'G') out = 'C';
else if (in == 'N') out = 'N';
else {
char msg[4] = "' '";
msg[1] = in;
exit(error(msg, ERRUNK));
}
return out;
}
/* char* getLine()
* Reads the next line from a file.
*/
char* getLine(char* line, int size, File in, bool gz) {
if (gz)
return gzgets(in.gzf, line, size);
else
return fgets(line, size, in.f);
}
/* void checkHeaders()
* Ensure headers match (up to first space character);
* create consensus header.
*/
void checkHeaders(char* head1, char* head2, char* header) {
bool ok = false; // match boolean
int j;
for (j = 0; head1[j] != '\n' && head1[j] != '\0'; j++) {
if (head1[j] != head2[j]) {
if (ok)
break;
for ( ; head1[j] != '\n' && head1[j] != '\0'
&& head1[j] != ' '; j++) ;
head1[j] = '\0'; // trim head1 for err msg
exit(error(head1, ERRHEAD));
} else if (head1[j] == ' ')
ok = true; // headers match
header[j] = head1[j];
}
if (header[j - 1] == ' ')
header[j - 1] = '\0'; // removing trailing space
else
header[j] = '\0';
}
/* void checkQual()
* Check given quality string for offset errors.
*/
void checkQual(char* qual, int len, int offset,
int maxQual) {
for (int i = 0; i < len; i++)
// error if qual < 0 or qual > maxQual
if (qual[i] < offset || qual[i] > offset + maxQual) {
char* msg = (char*) memalloc(MAX_SIZE);
sprintf(msg, "(range [0, %d], offset %d) '%c'",
maxQual, offset, qual[i]);
exit(error(msg, ERROFFSET));
}
}
/* void processSeq()
* Process the given sequence; save length;
* for 2nd read, save reversed seq/qual.
*/
void processSeq(char** read, int* len, bool i,
int j, int offset, int maxQual) {
// remove new-line character and save length
int k;
for (k = 0; read[j][k] != '\n' && read[j][k] != '\0'; k++) ;
read[j][k] = '\0';
if (j == SEQ)
*len = k; // save read length
else if (k != *len)
exit(error("", ERRQUAL)); // seq/qual length mismatch
// for 2nd read (i == true), save revComp(seq) or rev(qual)
if (i) {
int dest = j + EXTRA; // save to 'extra' field of read2
int m = 0;
if (j == SEQ) {
dest++; // increment b/c of fastq 'plus' line
for (k--; k > -1; k--)
read[dest][m++] = rc(read[j][k]);
} else
for (k--; k > -1; k--)
read[dest][m++] = read[j][k];
read[dest][m] = '\0';
} else if (j == SEQ)
// check 1st read's sequence for non-ACGTN chars
for (int m = 0; m < k; m++)
rc(read[j][m]);
// check quality scores
if (j == QUAL)
checkQual(read[j], k, offset, maxQual);
}
/* bool loadReads()
* Load a pair of reads. Check formatting, determine
* consensus header. Return false on EOF.
*/
bool loadReads(File in1, File in2, char** read1, char** read2,
char* header, int* len1, int* len2, int offset,
int maxQual, bool gz1, bool gz2) {
// load both reads from input files (LOCK)
bool flag = false; // boolean for EOF
#pragma omp critical
for (int i = 0; i < 2; i++) {
File in = in1;
char** read = read1;
bool gz = gz1;
if (i) {
in = in2;
read = read2;
gz = gz2;
}
// load read (4 lines)
for (int j = 0; j < FASTQ; j++)
if (getLine(read[j], MAX_SIZE, in, gz) == NULL) {
if (j == 0) {
if (i == 0) {
flag = true; // EOF
break;
} else {
int k = 0;
for ( ; read1[HEAD][k] != '\n' && read1[HEAD][k] != '\0'
&& read1[HEAD][k] != ' '; k++) ;
read1[HEAD][k] = '\0'; // trim header for err msg
exit(error(read1[HEAD], ERRHEAD));
}
} else
exit(error("", ERRSEQ));
}
if (flag)
break;
} // (UNLOCK)
if (flag)
return false; // EOF
// check fastq formatting
if (read1[HEAD][0] != BEGIN || read1[PLUS][0] != PLUSCHAR
|| read2[HEAD][0] != BEGIN || read2[PLUS][0] != PLUSCHAR)
exit(error("", ERRFASTQ));
// process sequence/quality lines
processSeq(read1, len1, false, SEQ, offset, maxQual);
processSeq(read1, len1, false, QUAL, offset, maxQual);
processSeq(read2, len2, true, SEQ, offset, maxQual);
processSeq(read2, len2, true, QUAL, offset, maxQual);
// check headers
checkHeaders(read1[HEAD], read2[HEAD], header);
return true;
}
/* float compare()
* Compare two sequences. Return the fraction mismatch.
*/
float compare(char* seq1, char* seq2, int length,
float mismatch, int overlap) {
int mis = 0; // number of mismatches
int len = length; // length of overlap, not counting Ns
float allow = len * mismatch;
for (int i = 0; i < length; i++) {
// do not count Ns
if (seq1[i] == 'N' || seq2[i] == 'N') {
if (--len < overlap || mis > len * mismatch)
return NOTMATCH;
allow = len * mismatch;
} else if (seq1[i] != seq2[i] && ++mis > allow)
return NOTMATCH;
}
return (float) mis / len;
}
/* int findPos()
* Find optimal overlapping position.
* Currently, quality scores are not considered
* (e.g. decreased penalty for a low-quality mismatch).
*/
int findPos (char* seq1, char* seq2, char* qual1,
char* qual2, int len1, int len2, int overlap,
bool dovetail, int doveOverlap, float mismatch,
bool maxLen, float* best) {
// check for regular (non-dovetailed) alignments
int pos = len1 - overlap + 1; // position of match
int i = len1 - overlap;
for ( ; i > -1 && len1 - i <= len2; i--) {
// align sequences
float res = compare(seq1 + i, seq2, len1 - i,
mismatch, overlap);
// compare result
if (res < *best || (res == *best && !maxLen)) {
*best = res;
pos = i;
}
if (res == 0.0f && maxLen)
return pos; // shortcut for exact match
}
// check for dovetailing
if (dovetail) {
// if no regular alignment, reset i
if (i == len1 - overlap)
i = (len1 > len2 ? len1 - len2 - 1 : -1);
// continue decrementing i
for ( ; ; i--) {
float res = NOTMATCH;
if (i >= 0) {
// read1 is longer, with 3' overhang
if (len2 < doveOverlap)
break;
res = compare(seq1 + i, seq2, len2,
mismatch, doveOverlap);
} else if (len1 < len2 + i) {
// read2 has 3' overhang, read1 determines overlap
if (len1 < doveOverlap)
break;
res = compare(seq1, seq2 - i, len1,
mismatch, doveOverlap);
} else {
// read2 has 3' overhang and determines overlap
if (len2 + i < doveOverlap)
break;
res = compare(seq1, seq2 - i, len2 + i,
mismatch, doveOverlap);
}
// compare result
if (res < *best || (res == *best && !maxLen)) {
*best = res;
pos = i;
}
if (res == 0.0f && maxLen)
return pos; // shortcut for exact match
}
}
return pos;
}
/* void printDove()
* Log 3' overhangs of dovetailed reads.
*/
void printDove(File dove, char* header, char** read1,
char** read2, int len1, int len2, int pos,
omp_lock_t* lock) {
if (len1 > len2 + pos || pos < 0) {
omp_set_lock(lock);
fprintf(dove.f, "%s\t%s\t%s\n", header + 1,
len1 > len2 + pos ? read1[SEQ] + len2 + pos : "-",
pos < 0 ? read2[SEQ] + len2 + pos : "-");
omp_unset_lock(lock);
}
}
/* void printGZNoAdapt()
* Print the reads minus adapters (gzip output).
*/
void printGZNoAdapt(gzFile out1, gzFile out2,
char** read1, char** read2, int end1, int end2) {
// print fwd read
gzprintf(out1, "%s", read1[HEAD]);
for (int i = 0; i < end1; i++)
gzputc(out1, read1[SEQ][i]);
gzprintf(out1, "\n%s", read1[PLUS]);
for (int i = 0; i < end1; i++)
gzputc(out1, read1[QUAL][i]);
gzputc(out1, '\n');
// print rev read
gzprintf(out2, "%s", read2[HEAD]);
for (int i = 0; i < end2; i++)
gzputc(out2, read2[SEQ][i]);
gzprintf(out2, "\n%s", read2[PLUS]);
for (int i = 0; i < end2; i++)
gzputc(out2, read2[QUAL][i]);
gzputc(out2, '\n');
}
/* void printNoAdapt()
* Print the reads minus adapters.
*/
void printNoAdapt(FILE* out1, FILE* out2, char** read1,
char** read2, int end1, int end2) {
// print fwd read
fprintf(out1, "%s", read1[HEAD]);
for (int i = 0; i < end1; i++)
fputc(read1[SEQ][i], out1);
fprintf(out1, "\n%s", read1[PLUS]);
for (int i = 0; i < end1; i++)
fputc(read1[QUAL][i], out1);
fputc('\n', out1);
// print rev read
fprintf(out2, "%s", read2[HEAD]);
for (int i = 0; i < end2; i++)
fputc(read2[SEQ][i], out2);
fprintf(out2, "\n%s", read2[PLUS]);
for (int i = 0; i < end2; i++)
fputc(read2[QUAL][i], out2);
fputc('\n', out2);
}
/* bool printResAdapt()
* Control printing of reads minus adapters.
* Return 1 if adapter found, else 0.
*/
bool printResAdapt(File out1, File out2, File dove,
bool doveOpt, char* header, char** read1, char** read2,
int len1, int len2, int pos, float best, bool gz,
omp_lock_t* lock) {
bool adapter = false;
int end1 = len1;
int end2 = len2;
// if found, identify locations of adapters
if (len1 > len2 + pos || pos < 0) {
adapter = true;
if (len1 > len2 + pos)
end1 = len2 + pos;
if (pos < 0)
end2 += pos;
if (doveOpt)
printDove(dove, header, read1, read2,
len1, len2, pos, lock + DOVE);
}
// print output
omp_set_lock(lock + OUT);
if (gz)
printGZNoAdapt(out1.gzf, out2.gzf, read1, read2,
end1, end2);
else
printNoAdapt(out1.f, out2.f, read1, read2,
end1, end2);
omp_unset_lock(lock + OUT);
return adapter;
}
/* void printAln2()
* Printing details of stitch mismatches.
*/
void printAln2(File aln, char* header, char** read1,
char** read2, int len1, int len2, int pos) {
int i = pos;
int j = 0;
if (pos < 0) {
j = -pos;
i = 0;
}
while (i < len1 && j < len2) {
if (read1[SEQ][i] == 'N' || read2[SEQ + EXTRA + 1][j] == 'N'
|| read1[SEQ][i] != read2[SEQ + EXTRA + 1][j])
fprintf(aln.f, "%s\t%d\t%c\t%c\t%c\t%c\n",
header + 1, i, read1[SEQ][i], read1[QUAL][i],
read2[SEQ + EXTRA + 1][j], read2[QUAL + EXTRA][j]);
i++;
j++;
}
}
/* void printAln()
* Print nicely formatted alignment of stitched reads.
*/
void printAln(File aln, char* header, char** read1,
char** read2, int len1, int len2, int pos) {
fprintf(aln.f, "%s\n", header + 1);
// print sequence alignment
fprintf(aln.f, "seq_R1: ");
for (int i = 0; i > pos; i--)
fputc(' ', aln.f);
fprintf(aln.f, "%s\n", read1[SEQ]);
// print '|' for matches, ':' for Ns
fprintf(aln.f, " ");
int i;
for (i = 0; i < abs(pos); i++)
fputc(' ', aln.f);
int j = 0;
if (pos < 0) {
j = -pos;
i = 0;
}
while (i < len1 && j < len2) {
fputc((read1[SEQ][i] == 'N' || read2[SEQ + EXTRA + 1][j] == 'N') ?
':' : (read1[SEQ][i] == read2[SEQ + EXTRA + 1][j] ?
'|' : ' '), aln.f);
i++;
j++;
}
fputc('\n', aln.f);
fprintf(aln.f, "seq_R2: ");
for (int i = 0; i < pos; i++)
fputc(' ', aln.f);
fprintf(aln.f, "%s\n\n", read2[SEQ + EXTRA + 1]);
// print quality scores
fprintf(aln.f, "qual_R1: ");
for (int i = 0; i > pos; i--)
fputc(' ', aln.f);
fprintf(aln.f, "%s\n", read1[QUAL]);
fprintf(aln.f, "qual_R2: ");
for (int i = 0; i < pos; i++)
fputc(' ', aln.f);
fprintf(aln.f, "%s\n\n", read2[QUAL + EXTRA]);
}
/* void createSeq()
* Create stitched sequence (into seq1, qual1).
* Use empirical error profiles for quality scores,
* or 'fastq-join' method.
*/
void createSeq(char* seq1, char* seq2, char* qual1,
char* qual2, int len1, int len2, int pos,
int offset, char** match, char** mism, bool fjoin) {
int len = len2 + pos; // length of stitched sequence
for (int i = 0; i < len; i++) {
if (i - pos < 0) {
// 1st read only: continue
continue;
} else if (i >= len1) {
// 2nd read only: copy seq and qual
seq1[i] = seq2[i-pos];
qual1[i] = qual2[i-pos];
} else if (seq2[i-pos] == 'N') {
// 2nd read 'N': continue
continue;
} else if (seq1[i] == 'N') {
// 1st read 'N': copy seq and qual
seq1[i] = seq2[i-pos];
qual1[i] = qual2[i-pos];
} else if (seq1[i] != seq2[i-pos]) {
// mismatch:
// - base matches higher quality score or equal
// quality score that is closer to 5' end
// - quality score calculated as diff (fastq-join
// method) or copied from mism array
if (qual1[i] < qual2[i-pos] ||
(qual1[i] == qual2[i-pos] && i >= len / 2.0) )
seq1[i] = seq2[i-pos];
if (fjoin)
qual1[i] = abs(qual2[i-pos] - qual1[i]) + offset;
else
qual1[i] = mism[ (int) qual1[i] - offset ]
[ (int) qual2[i-pos] - offset ] + offset;
} else {
// match:
// - quality score calculated as max (fastq-join
// method) or copied from match array
if (fjoin) {
if (qual1[i] < qual2[i-pos])
qual1[i] = qual2[i-pos];
} else
qual1[i] = match[ (int) qual1[i] - offset ]
[ (int) qual2[i-pos] - offset ] + offset;
}
}
seq1[len] = '\0';
qual1[len] = '\0';
}
/* void printRes()
* Print stitched read.
*/
void printRes(File out, File log, bool logOpt, File dove,
bool doveOpt, File aln, int alnOpt, char* header,
char** read1, char** read2, int len1, int len2,
int pos, float best, int offset, bool gz, bool fjoin,
char** match, char** mism, omp_lock_t* lock) {
// log result
if (logOpt) {
omp_set_lock(lock + LOG);
fprintf(log.f, "%s\t%d\t%d\t", header + 1,
pos < 0 ? (len2+pos < len1 ? len2+pos : len1) :
(len1-pos < len2 ? len1-pos : len2), len2 + pos);
best ? fprintf(log.f, "%.3f", best) : fprintf(log.f, "0");
fprintf(log.f, "\n");
omp_unset_lock(lock + LOG);
}
if (doveOpt)
printDove(dove, header, read1, read2, len1, len2,
pos, lock + DOVE);
// print formatted alignments
if (alnOpt == 1) {
omp_set_lock(lock + ALN);
printAln(aln, header, read1, read2, len1, len2, pos);
// create stitched sequence
createSeq(read1[SEQ], read2[SEQ + EXTRA + 1],
read1[QUAL], read2[QUAL + EXTRA], len1, len2,
pos, offset, match, mism, fjoin);
// print merged seq to alignment output
fprintf(aln.f, "merged\nseq: ");
for (int i = 0; i > pos; i--)
fputc(' ', aln.f);
fprintf(aln.f, "%s\n", read1[SEQ]);
fprintf(aln.f, "qual: ");
for (int i = 0; i > pos; i--)
fputc(' ', aln.f);
fprintf(aln.f, "%s\n\n\n", read1[QUAL]);
omp_unset_lock(lock + ALN);
} else {
// print stitch differences
if (alnOpt == 2) {
omp_set_lock(lock + ALN);
printAln2(aln, header, read1, read2, len1, len2, pos);
omp_unset_lock(lock + ALN);
}
// create stitched sequence
createSeq(read1[SEQ], read2[SEQ + EXTRA + 1],
read1[QUAL], read2[QUAL + EXTRA], len1, len2,
pos, offset, match, mism, fjoin);
}
// print stitched sequence
omp_set_lock(lock + OUT);
if (gz)
gzprintf(out.gzf, "%s\n%s\n+\n%s\n", header,
read1[SEQ], read1[QUAL]);
else
fprintf(out.f, "%s\n%s\n+\n%s\n", header,
read1[SEQ], read1[QUAL]);
omp_unset_lock(lock + OUT);
}
/* void printFail()
* Print stitch failure reads.
*/
void printFail(File un1, File un2, bool unOpt,
File log, bool logOpt, char* header, char** read1,
char** read2, bool gz, omp_lock_t* outLock,
omp_lock_t* logLock) {
if (logOpt) {
omp_set_lock(logLock);
fprintf(log.f, "%s\t%s\n", header + 1, NA);
omp_unset_lock(logLock);
}
if (unOpt) {
omp_set_lock(outLock);
if (gz) {
gzprintf(un1.gzf, "%s%s\n%s%s\n", read1[HEAD],
read1[SEQ], read1[PLUS], read1[QUAL]);
gzprintf(un2.gzf, "%s%s\n%s%s\n", read2[HEAD],
read2[SEQ], read2[PLUS], read2[QUAL]);
} else {
fprintf(un1.f, "%s%s\n%s%s\n", read1[HEAD],
read1[SEQ], read1[PLUS], read1[QUAL]);
fprintf(un2.f, "%s%s\n%s%s\n", read2[HEAD],
read2[SEQ], read2[PLUS], read2[QUAL]);
}
omp_unset_lock(outLock);
}
}
/* int readFile()
* Analyzes the reads in a set of input files.
* Controls writing to the output file(s).
* Multithreaded.
*/
int readFile(File in1, File in2, File out, File out2,
File un1, File un2, bool unOpt, File log,
bool logOpt, int overlap, bool dovetail, int doveOverlap,
File dove, bool doveOpt, File aln, int alnOpt,
bool adaptOpt, float mismatch, bool maxLen,
int* stitch, int offset, int maxQual,
bool gz1, bool gz2, bool gzOut, bool fjoin,
char** match, char** mism, int threads) {
// initialize omp locks -- out, un, log, dove, aln
omp_lock_t lock[OMP_LOCKS];
for (int i = 0; i < OMP_LOCKS; i++)
omp_init_lock(&lock[i]);
// process files in parallel
int count = 0, stitchRed = 0;
#pragma omp parallel num_threads(threads) reduction(+: count, stitchRed)
{
// allocate memory for both reads
char** read1 = (char**) memalloc(FASTQ * sizeof(char*));
char** read2 = (char**) memalloc((FASTQ + EXTRA) * sizeof(char*));
for (int i = 0; i < FASTQ + EXTRA; i++) {
if (i < FASTQ)
read1[i] = (char*) memalloc(MAX_SIZE);
// for 2nd read, save extra fields for revComp(seq) and rev(qual)
read2[i] = (char*) memalloc(MAX_SIZE);
}
char* header = (char*) memalloc(MAX_SIZE); // consensus header
// process reads
int len1 = 0, len2 = 0; // lengths of reads
while (loadReads(in1, in2, read1, read2, header,
&len1, &len2, offset, maxQual, gz1, gz2)) {
// find optimal overlap
float best = 1.0f;
int pos = findPos(read1[SEQ], read2[SEQ + EXTRA + 1],
read1[QUAL], read2[QUAL + EXTRA], len1, len2, overlap,
dovetail, doveOverlap, mismatch, maxLen, &best);
// print result
if (pos == len1 - overlap + 1) {
// stitch failure
if (adaptOpt)
printFail(out, out2, 1, log, 0, header, read1,
read2, gzOut, lock + OUT, lock + LOG);
else
printFail(un1, un2, unOpt, log, logOpt, header,
read1, read2, gzOut, lock + UN, lock + LOG);
} else {
// stitch success
if (adaptOpt) {
stitchRed += printResAdapt(out, out2, dove, doveOpt,
header, read1, read2, len1, len2, pos, best,
gzOut, lock);
} else {
printRes(out, log, logOpt, dove, doveOpt, aln, alnOpt,
header, read1, read2, len1, len2, pos, best, offset,
gzOut, fjoin, match, mism, lock);
stitchRed++;
}
}
count++;
}
// free memory
free(header);
for (int i = 0; i < FASTQ + EXTRA; i++) {
if (i < FASTQ)
free(read1[i]);
free(read2[i]);
}
free(read1);
free(read2);
} // END parallel
// destroy omp locks
for (int i = 0; i < 5; i++)
omp_destroy_lock(&lock[i]);
*stitch = stitchRed;
return count;
}
/* void openWrite()
* Open a file for writing (stdout if file is '-').
*/
void openWrite(char* outFile, File* out, bool gz) {
if (outFile[0] == '-' && strlen(outFile) > 1)
exit(error(outFile, ERRNAME));
if (gz) {
if (!strcmp(outFile + strlen(outFile) - strlen(GZEXT), GZEXT)
|| !strcmp(outFile, "/dev/null"))
out->gzf = gzopen(outFile, "w");
else if (!strcmp(outFile, "-"))
out->gzf = gzdopen(fileno(stdout), "wb");
else {
// add ".gz" to outFile
char* outFile2 = memalloc(strlen(outFile)
+ strlen(GZEXT) + 1);
strcpy(outFile2, outFile);
strcat(outFile2, GZEXT);
out->gzf = gzopen(outFile2, "w");
free(outFile2);
}
if (out->gzf == NULL)
exit(error(outFile, ERROPENW));
} else {
out->f = (strcmp(outFile, "-") ?
fopen(outFile, "w") : stdout);
if (out->f == NULL)
exit(error(outFile, ERROPENW));
}
}
/* void openFiles()
* Opens output files for the program,
* adjusting file names/extensions as needed.
*/
void openFiles(char* outFile, File* out, File* out2,
char* unFile, File* un1, File* un2,
char* logFile, File* log,
char* doveFile, File* dove, bool dovetail,
char* alnFile, File* aln,
bool adaptOpt, bool gz, bool interOpt) {
if (adaptOpt) {
if (interOpt)
openWrite(outFile, out, gz);
else if (! strcmp(outFile, "-"))
exit(error("stdout + \"_1.fastq\"", ERROPENW));
else if (! strcmp(outFile, "/dev/null")) {
openWrite(outFile, out, gz);
openWrite(outFile, out2, gz);
} else {
// add "_1.fastq" and "_2.fastq" extensions
int add = strlen(ONEEXT) > strlen(TWOEXT) ?
strlen(ONEEXT) + 1 : strlen(TWOEXT) + 1;
char* outFile2 = memalloc(strlen(outFile) + add);
strcpy(outFile2, outFile);
strcat(outFile2, ONEEXT);
openWrite(outFile2, out, gz);
strcpy(outFile2, outFile);
strcat(outFile2, TWOEXT);
openWrite(outFile2, out2, gz);
free(outFile2);
}
} else {
openWrite(outFile, out, gz);
// open optional files
if (unFile != NULL) {
if (interOpt)
openWrite(unFile, un1, gz);
else if (! strcmp(unFile, "-"))
exit(error("stdout + \"_1.fastq\"", ERROPENW));
else {
// add "_1.fastq" and "_2.fastq" extensions
int add = strlen(ONEEXT) > strlen(TWOEXT) ?
strlen(ONEEXT) + 1 : strlen(TWOEXT) + 1;
char* unFile2 = memalloc(strlen(unFile) + add);
strcpy(unFile2, unFile);
strcat(unFile2, ONEEXT);
openWrite(unFile2, un1, gz);
strcpy(unFile2, unFile);
strcat(unFile2, TWOEXT);
openWrite(unFile2, un2, gz);
free(unFile2);
}
}
if (logFile != NULL) {
openWrite(logFile, log, false);
fprintf(log->f, "Read\tOverlapLen\tStitchedLen\tMismatch\n");
}
if (alnFile != NULL)
openWrite(alnFile, aln, false);
}
if (dovetail && doveFile != NULL) {
openWrite(doveFile, dove, false);
fprintf(dove->f, "Read\tAdapter_R1\tAdapter_R2\n");
}
}
/* bool openRead()
* Open a file for reading (stdin if file is '-').
* Return true if gzip compressed.
*/
bool openRead(char* inFile, File* in) {
// open file or stdin
bool stdinBool = (strcmp(inFile, "-") ? false : true);
FILE* dummy = (stdinBool ? stdin : fopen(inFile, "r"));
if (dummy == NULL)
exit(error(inFile, ERROPEN));
// check for gzip compression: magic number 0x1F, 0x8B
bool gzip = true;
int save = 0; // first char to pushback (for stdin)
int i, j;
for (i = 0; i < 2; i++) {
j = fgetc(dummy);
if (j == EOF)
exit(error(inFile, ERROPEN));
if ( (i && (unsigned char) j != 0x8B)
|| (! i && (unsigned char) j != 0x1F) ) {
gzip = false;
break;
}
if (! i)
save = j;
}
// for stdin, push back chars
if (stdinBool) {
if (gzip)
exit(error("", ERRGZIP));
if (ungetc(j, dummy) == EOF)
exit(error("", ERRUNGET));
if (i && ungetc(save, dummy) == EOF)
exit(error("", ERRUNGET));
}
// open file
if (gzip) {
if (fclose(dummy))
exit(error("", ERRCLOSE));
in->gzf = gzopen(inFile, "r");
if (in->gzf == NULL)
exit(error(inFile, ERROPEN));
} else {
if (! stdinBool)
rewind(dummy);
in->f = dummy;
}
return gzip;
}
/* void loadQual()
* Load quality score profiles from file.
*/
void loadQual(char* qualFile, int maxQual,
char*** match, char*** mism) {
File qual;
bool gz = openRead(qualFile, &qual);
char* line = memalloc(MAX_SIZE);
char** arr = NULL; // array to save to
int i = 0, matIdx = 0, misIdx = 0; // array indices
while (getLine(line, MAX_SIZE, qual, gz) != NULL) {
if (line[0] == '#' || line[0] == '\n') {
// determine target array
i = 0;
if (! strcmp(line + 1, "match\n"))
arr = *match;
else if (! strcmp(line + 1, "mismatch\n"))
arr = *mism;
} else if (arr == NULL) {
continue;
} else {
// remove trailing '\n'
int j;
for (j = 0; line[j] != '\n' && line[j] != '\0'; j++) ;
line[j] = '\0';
// save values to array
char* tok = strtok(line, CSV);
for (j = 0; j < maxQual + 1; j++) {
if (tok == NULL) {
char* msg = (char*) memalloc(MAX_SIZE);
sprintf(msg, "(range [0, %d]) %s",
maxQual, qualFile);
exit(error(msg, ERRRANGE));
}
arr[i][j] = getInt(tok);
tok = strtok(NULL, CSV);
}
i++;
if ( (arr == *match && ++matIdx > maxQual)
|| (arr == *mism && ++misIdx > maxQual) )
arr = NULL;
}
}
// make sure all values were loaded
if (matIdx < maxQual + 1 || misIdx < maxQual + 1) {
char* msg = (char*) memalloc(MAX_SIZE);
sprintf(msg, "(range [0, %d]) %s", maxQual, qualFile);
exit(error(msg, ERRRANGE));
}
if ( (gz && gzclose(qual.gzf) != Z_OK) ||
(! gz && fclose(qual.f) ) )
exit(error("", ERRCLOSE));
free(line);
}
/* void saveQual()
* Save quality score profiles.
*/
void saveQual(char* qualFile, int maxQual,
char*** match, char*** mism) {
// allocate memory
*match = (char**) memalloc((maxQual + 1) * sizeof(char*));
*mism = (char**) memalloc((maxQual + 1) * sizeof(char*));
for (int i = 0; i < maxQual + 1; i++) {
(*match)[ i ] = (char*) memalloc(maxQual + 1);
(*mism)[ i ] = (char*) memalloc(maxQual + 1);
}
if (qualFile == NULL) {
// copy quality profile from const arrays
if (maxQual > MAXQUAL)
exit(error("", ERRDEFQ));
for (int i = 0; i < maxQual + 1; i++)
for (int j = 0; j < maxQual + 1; j++) {
(*match)[ i ][ j ] = match_profile[ i ][ j ];
(*mism)[ i ][ j ] = mismatch_profile[ i ][ j ];
}
} else
// load from file
loadQual(qualFile, maxQual, match, mism);
}
/* void runProgram()
* Controls the opening/closing of files,
* and analysis by readFile().
*/
void runProgram(char* outFile, char* inFile1,
char* inFile2, bool inter, char* unFile,
char* logFile, int overlap, bool dovetail,
char* doveFile, int doveOverlap, char* alnFile,
int alnOpt, bool adaptOpt, int gzOut, bool fjoin,
bool interOpt, float mismatch, bool maxLen,
int offset, int maxQual, char* qualFile,
bool verbose, int threads) {
// get first set of input file names
char* end1, *end2;
char* file1 = strtok_r(inFile1, COM, &end1);
char* file2 = file1;
if (! inter)
file2 = strtok_r(inFile2, COM, &end2);
// loop through input files
File out, out2, un1, un2, log, dove, aln; // output files
char** match = NULL, **mism = NULL; // quality score profiles
int i = 0; // count of files processed
int tCount = 0, tStitch = 0; // counting variables
while (file1 && file2) {
// open input files
File in1, in2;
bool gz1 = openRead(file1, &in1);
bool gz2 = gz1;
if (! inter)
gz2 = openRead(file2, &in2);
// on first iteration, load quals and open outputs
if (! i) {
// load quality score profile
if (! fjoin && ! adaptOpt)
saveQual(qualFile, maxQual, &match, &mism);
// open output files
if (gzOut == -1)
gzOut = 0;
else if (gz1 || gz2)
gzOut = 1;
openFiles(outFile, &out, &out2,
unFile, &un1, &un2, logFile, &log,
doveFile, &dove, dovetail, alnFile, &aln,
adaptOpt, gzOut, interOpt);
}
// process files
if (verbose)
fprintf(stderr, "Processing files: %s,%s\n", file1,
inter ? "(interleaved)" : file2);
int stitch = 0; // counting variable
int count = readFile(in1, inter ? in1 : in2,
out, interOpt ? out : out2,
un1, interOpt ? un1 : un2, unFile != NULL,
log, logFile != NULL,
overlap, dovetail, doveOverlap, dove,
dovetail && doveFile != NULL, aln, alnOpt,
adaptOpt, mismatch, maxLen, &stitch,
offset, maxQual, gz1, gz2, gzOut, fjoin,
match, mism, threads);
tCount += count;
tStitch += stitch;
// log counts
if (verbose) {
fprintf(stderr, " Fragments (pairs of reads) analyzed: %d\n", count);
if (adaptOpt)
fprintf(stderr, " Adapters removed: %d\n", stitch);
else
fprintf(stderr, " Successfully stitched: %d\n", stitch);
}
// close input files
if ( (gz1 && gzclose(in1.gzf) != Z_OK) || (! gz1 && fclose(in1.f))
|| (! inter && ( (gz2 && gzclose(in2.gzf) != Z_OK)
|| (! gz2 && fclose(in2.f)) ) ) )
exit(error("", ERRCLOSE));
file1 = strtok_r(NULL, COM, &end1);
file2 = file1;
if (! inter)
file2 = strtok_r(NULL, COM, &end2);
i++;
}
if (verbose && i > 1) {
fprintf(stderr, "Total counts\n");
fprintf(stderr, " Fragments (pairs of reads) analyzed: %d\n", tCount);
if (adaptOpt)
fprintf(stderr, " Adapters removed: %d\n", tStitch);
else
fprintf(stderr, " Successfully stitched: %d\n", tStitch);
}
// free memory for qual score profiles
if (! fjoin && ! adaptOpt) {
for (int i = 0; i < maxQual + 1; i++) {
free(match[i]);
free(mism[i]);
}
free(match);
free(mism);
}
// close files
if ( ( gzOut && ( gzclose(out.gzf) != Z_OK ||
(adaptOpt && ! interOpt && gzclose(out2.gzf) != Z_OK) ||
(unFile != NULL && (gzclose(un1.gzf) != Z_OK ||
(! interOpt && gzclose(un2.gzf) != Z_OK)) ) ) ) ||
( ! gzOut && ( fclose(out.f) ||
(adaptOpt && ! interOpt && fclose(out2.f)) ||
(unFile != NULL && (fclose(un1.f) ||
(! interOpt && fclose(un2.f)) ) ) ) ) ||
(logFile != NULL && fclose(log.f)) ||
(dovetail && doveFile != NULL && fclose(dove.f)) ||
(alnFile != NULL && fclose(aln.f)) )
exit(error("", ERRCLOSE));
}
/* void getArgs()
* Parse the command-line. Check for errors.
*/
void getArgs(int argc, char** argv) {
// default parameters/filenames
char* outFile = NULL, *inFile1 = NULL, *inFile2 = NULL,
*unFile = NULL, *logFile = NULL, *doveFile = NULL,
*alnFile = NULL, *qualFile = NULL;
int overlap = DEFOVER, doveOverlap = DEFDOVE, gzOut = 0,
offset = OFFSET, maxQual = MAXQUAL, threads = DEFTHR;
float mismatch = DEFMISM;
bool dovetail = false, adaptOpt = false, maxLen = true,
diffOpt = false, interOpt = false, fjoin = false,
verbose = false;
// parse argv
int c;
while ( (c = getopt_long(argc, argv, OPTIONS, long_options, NULL)) != -1 )
switch (c) {
case HELP: usage(0); break;
case VERSOPT: printVersion(); break;
case MAXOPT: maxLen = false; break;
case DOVEOPT: dovetail = true; break;
case ADAPTOPT: adaptOpt = true; break;
case GZOPT: gzOut = 1; break;
case UNGZOPT: gzOut = -1; break;
case DIFFOPT: diffOpt = true; break;
case INTEROPT: interOpt = true; break;
case FJOINOPT: fjoin = true; break;
case VERBOSE: verbose = true; break;
case OUTFILE: outFile = optarg; break;
case FIRST: inFile1 = optarg; break;
case SECOND: inFile2 = optarg; break;
case UNFILE: unFile = optarg; break;
case LOGFILE: logFile = optarg; break;
case DOVEFILE: doveFile = optarg; break;
case ALNFILE: alnFile = optarg; break;
case OVERLAP: overlap = getInt(optarg); break;
case DOVEOVER: doveOverlap = getInt(optarg); break;
case MISMATCH: mismatch = getFloat(optarg); break;
case QUALITY: offset = getInt(optarg); break;
case SETQUAL: maxQual = getInt(optarg); break;
case QUALFILE: qualFile = optarg; break;
case THREADS: threads = getInt(optarg); break;
default: exit(-1);
}
if (optind < argc)
exit(error(argv[optind], ERRPARAM));
// check for argument errors
if (outFile == NULL || inFile1 == NULL) {
error("", ERRFILE);
usage(-1);
}
bool inter = false; // interleaved input
if (inFile2 == NULL) {
if (verbose)
fprintf(stderr, "Warning: only one input file specified -- assuming interleaved\n");
inter = true;
}
if (qualFile != NULL)
fjoin = false; // given qualFile takes precedence over fastq-join method
if (overlap <= 0 || doveOverlap <= 0)
exit(error("", ERROVER));
if (mismatch < 0.0f || mismatch >= 1.0f)
exit(error("", ERRMISM));
if (threads < 1)
exit(error("", ERRTHREAD));
// adjust parameters for adapter-removal mode
if (adaptOpt) {
dovetail = true;
unFile = logFile = alnFile = qualFile = NULL;
}
int alnOpt = (alnFile != NULL ? (diffOpt ? 2 : 1) : 0);
// send arguments to runProgram()
runProgram(outFile, inFile1, inFile2, inter, unFile,
logFile, overlap, dovetail, doveFile, doveOverlap,
alnFile, alnOpt, adaptOpt, gzOut, fjoin, interOpt,
mismatch, maxLen, offset, maxQual, qualFile, verbose,
threads);
}
/* int main()
* Main.
*/
int main(int argc, char* argv[]) {
getArgs(argc, argv);
return 0;
}
|
contact.c | #include <stdio.h>
#include <math.h>
inline double sqeuclidean3(const double a[], const double b[]) {
//Calculate the dot product between length-three vectors b and c
return (a[0] - b[0])*(a[0] - b[0]) + (a[1] - b[1])*(a[1] - b[1]) + (a[2] - b[2])*(a[2] - b[2]) ;
}
void atomic_contact(const double *xyzlist, const int *contacts, int num_contacts,
int traj_length, int num_atoms, double *results) {
// For each length-2 row of contacts, compute the distance between the atoms with indices
// in the first and second entries
int i, j;
const int* atom_ind;
const double *frame, *atom1, *atom2;
double *results_ptr;
#pragma omp parallel for default(none) shared(results, xyzlist, contacts, num_contacts, num_atoms, traj_length) private(j, atom_ind, frame, atom1, atom2, results_ptr)
for (i = 0; i < traj_length; i++) {
frame = (const double*) xyzlist + num_atoms * 3 * i;
results_ptr = results + num_contacts * i;
atom_ind = contacts;
for (j = 0; j < num_contacts; j++, results_ptr++, atom_ind = atom_ind + 2) {
//indices of the two atoms
atom1 = frame + *(atom_ind) * 3;
atom2 = frame + *(atom_ind + 1) * 3;
*results_ptr = sqrt(sqeuclidean3(atom1, atom2));
}
}
}
void atomic_displacement(const double *xyzlist, const int *contacts, int num_contacts,
int traj_length, int num_atoms, double *results_dr, double *results_dx) {
// For each length-2 row of contacts, compute the distance between the atoms with indices
// in the first and second entries
int i, j, k;
const int* atom_ind;
const double *frame, *atom1, *atom2;
double *results_dr_ptr, *results_dx_ptr;
#pragma omp parallel for default(none) shared(results_dr, results_dx, xyzlist, contacts, num_contacts, num_atoms, traj_length) private(j, k, atom_ind, frame, atom1, atom2, results_dr_ptr, results_dx_ptr)
for (i = 0; i < traj_length; i++) {
frame = (const double*) xyzlist + num_atoms * 3 * i;
results_dr_ptr = results_dr + num_contacts * i;
results_dx_ptr = results_dx + num_contacts * i * 3;
atom_ind = contacts;
for (j = 0; j < num_contacts; j++, results_dr_ptr++, atom_ind = atom_ind + 2) {
//indices of the two atoms
atom1 = frame + *(atom_ind) * 3;
atom2 = frame + *(atom_ind + 1) * 3;
*results_dr_ptr = sqrt(sqeuclidean3(atom1, atom2));
for (k = 0; k < 3 ; k++, results_dx_ptr++) {
*results_dx_ptr = atom2[k] - atom1[k];
}
}
}
}
void closest_contact(const double *xyzlist, const int *residues,
const int num_residues, const int residue_width,
const int* atoms_per_residue,
const int *contacts, int num_contacts, int traj_length,
int num_atoms, double *results) {
/*
xyzlist - traj_length x num_atoms x 3
residue_atoms - num_residues x residue_width, but only the first num_residue_atoms are used
num_residue_atoms - num_residues x 1 -- max column index ofresidue_atoms that we care about (rest is padding)
contacts - num_contacts x 2 -- each row is the indices of the RESIDUES who we monitor for contact
results traj_length x num_contacts
*/
int i, j, k, l, max_k, max_l;
int *atom0_ind_ptr, *atom1_ind_ptr, *a1_ind_ptr;
int *contact_ptr;
double min, curr;
double *results_ptr;
const double *frame, *atom0, *atom1;
#pragma omp parallel for default(none) shared(results, xyzlist, contacts, num_contacts, num_atoms, traj_length, residues, atoms_per_residue) private(j, k, l, max_k, max_l, atom0_ind_ptr, atom1_ind_ptr, a1_ind_ptr, contact_ptr, min, curr, results_ptr, frame, atom0, atom1)
for (i = 0; i < traj_length; i++) {
frame = (const double*) xyzlist + num_atoms * 3 * i;
contact_ptr = (int*) contacts;
results_ptr = results + num_contacts * i;
for (j = 0; j < num_contacts; j++, contact_ptr += 2, results_ptr++) {
//Calculate the distance between each atom in residue_atoms[contacts[j,0]]
//and residue_atoms[contacts[j,1]]
atom0_ind_ptr = (int*) residues + *(contact_ptr) * residue_width;
atom1_ind_ptr = (int*) residues + *(contact_ptr + 1) * residue_width;
max_k = *(atoms_per_residue + *(contact_ptr));
max_l = *(atoms_per_residue + *(contact_ptr + 1));
min = 1000000;
for (k = 0; k < max_k; k++, atom0_ind_ptr++) {
a1_ind_ptr = atom1_ind_ptr;
for (l = 0; l < max_l; l++, a1_ind_ptr++ ) {
//printf("Comparing atoms %d and %d\n", *atom0_ind_ptr, *a1_ind_ptr);
atom0 = frame + *(atom0_ind_ptr) * 3;
atom1 = frame + *(a1_ind_ptr) * 3;
//printf("With x coords %f, %f\n", *atom0, *atom1);
curr = sqeuclidean3(atom0, atom1);
min = curr < min ? curr : min;
}
}
//printf("Min is %f\n", min);
*results_ptr = sqrt(min);
}
//printf("Next frame\n");
}
}
|
nco_lmt.c | /* $Header$ */
/* Purpose: Hyperslab limits */
/* Copyright (C) 1995--present Charlie Zender
This file is part of NCO, the netCDF Operators. NCO is free software.
You may redistribute and/or modify NCO under the terms of the
3-Clause BSD License with exceptions described in the LICENSE file */
#include "nco_lmt.h" /* Hyperslab limits */
void
nco_lmt_init /* [fnc] Initialize limit to NULL/invalid values */
(lmt_sct *lmt) /* I/O [sct] Limit structure to initialize */
{
lmt->nm=NULL; /* [sng] Dimension name */
lmt->nm_fll=NULL; /* [sng] Full dimension name */
lmt->grp_nm_fll=NULL; /* [sng] Full group where dimension is defined */
lmt->ssc_sng=NULL; /* [sng] User-specified string for dimension subcycle */
lmt->max_sng=NULL; /* [sng] User-specified string for dimension maximum */
lmt->min_sng=NULL; /* [sng] User-specified string for dimension minimum */
lmt->ilv_sng=NULL; /* [sng] User-specified string for interleave stride */
lmt->rbs_sng=NULL; /* [sng] Used by ncra, ncrcat to re-base record coordinate (holds unit attribute from first file) */
lmt->srd_sng=NULL; /* [sng] User-specified string for dimension stride */
lmt->max_val=-1; /* [nbr] Double precision representation of maximum value of coordinate requested or implied */
lmt->min_val=-1; /* [nbr] Double precision representation of minimum value of coordinate requested or implied */
lmt->origin=-1; /* [nbr] Used by ncra, ncrcat to re-base record coordinate */
lmt->id=-1; /* [ID] Dimension ID */
lmt->lmt_typ=-1; /* [enm] Limit type (0, Coordinate value limit, 1, Dimension index limit, 2, UDUnits string ) */
lmt->cnt=-1; /* [nbr] Valid elements in this dimension (including effects of stride and wrapping) */
lmt->ssc=-1; /* [nbr] Subcycle of hyperslab */
lmt->ilv=-1; /* [nbr] Interleave stride */
lmt->end=-1; /* [nbr] Index to end of hyperslab */
lmt->max_idx=-1; /* [nbr] Index of maximum requested value in dimension */
lmt->min_idx=-1; /* [nbr] Index of minimum requested value in dimension */
lmt->rec_dmn_sz=-1; /* [nbr] Number of records in this file (multi-file record dimension only) */
lmt->rec_in_cml=-1; /* [nbr] Cumulative number of records in all files opened so far (multi-file record dimension only) */
lmt->idx_end_max_abs=-1; /* [nbr] Maximum allowed index in record dimension (multi-file record dimension only) */
lmt->rec_skp_ntl_spf=-1; /* [nbr] Records skipped in initial superfluous files (multi-file record dimension only) */
lmt->rec_skp_vld_prv=-1; /* [nbr] Records skipped since previous good one (multi-file record dimension only) */
lmt->rec_rmn_prv_ssc=-1; /* [nbr] Records remaining-to-be-read to complete subcycle group from previous file (multi-file record dimension only) */
lmt->srd=-1; /* [nbr] Stride of hyperslab */
lmt->srt=-1; /* [nbr] Index to start of hyperslab */
lmt->flg_ilv=False; /* [flg] True for interleaved output (used by ncra only) */
lmt->flg_mro=False; /* [flg] True for multi-record output (used by ncra only) */
lmt->flg_mso=False; /* [flg] True for multi-subcycle output (used by ncra only) */
lmt->flg_input_complete=False;/* [flg] True for multi-file operators when no more files need be opened */
lmt->is_rec_dmn=False; /* [flg] True if record dimension, else False */
lmt->is_usr_spc_lmt=False; /* [flg] True if any part of limit is user-specified, else False */
lmt->is_usr_spc_max=False; /* [flg] True if user-specified, else False */
lmt->is_usr_spc_min=False; /* [flg] True if user-specified, else False */
lmt->cln_typ=cln_nil; /* [enm] Used by ncra, ncrcat to store enum of calendar-type attribute */
} /* end nco_lmt_init() */
void
nco_lmt_prn /* [fnc] Print a Limit structure */
(lmt_sct *lmt) /* I/O [sct] Limit structure to print */
{
(void)fprintf(stdout,"Name: %s\n",lmt->nm);
(void)fprintf(stdout,"User-specified string for dimension subcycle: %s\n",lmt->ssc_sng);
(void)fprintf(stdout,"User-specified string for dimension maximum : %s\n",lmt->max_sng);
(void)fprintf(stdout,"User-specified string for dimension minimum: %s\n",lmt->min_sng);
(void)fprintf(stdout,"User-specified string for interleave stride: %s\n",lmt->ilv_sng);
(void)fprintf(stdout,"Unit attribute from first file: %s\n",lmt->rbs_sng);
(void)fprintf(stdout,"User-specified string for dimension stride: %s\n",lmt->srd_sng);
(void)fprintf(stdout,"Maximum value of coordinate: %f\n",lmt->max_val);
(void)fprintf(stdout,"Minimum value of coordinate: %f\n",lmt->min_val);
(void)fprintf(stdout,"Origin: %f\n",lmt->origin);
(void)fprintf(stdout,"ID: %d\n",lmt->id);
(void)fprintf(stdout,"Limit type: %d\n",lmt->lmt_typ);
(void)fprintf(stdout,"Valid elements (i.e., count): %li\n",lmt->cnt);
(void)fprintf(stdout,"Index of hyperslab start: %li\n",lmt->srt);
(void)fprintf(stdout,"Index of hyperslab end: %li\n",lmt->end);
(void)fprintf(stdout,"Hyperslab stride: %li\n",lmt->srd);
(void)fprintf(stdout,"Subcycle length: %li\n",lmt->ssc);
(void)fprintf(stdout,"Interleave stride: %li\n",lmt->ilv);
(void)fprintf(stdout,"Index of maximum requested value: %li\n",lmt->max_idx);
(void)fprintf(stdout,"Index of minimum requested value: %li\n",lmt->min_idx);
(void)fprintf(stdout,"Number of records in this file: %li\n",lmt->rec_dmn_sz);
(void)fprintf(stdout,"Cumulative number of records in all files: %li\n",lmt->rec_in_cml);
(void)fprintf(stdout,"Maximum allowed index in record dimension: %li\n",lmt->idx_end_max_abs);
(void)fprintf(stdout,"Records skipped in initial superfluous files: %li\n",lmt->rec_skp_ntl_spf);
(void)fprintf(stdout,"Records skipped since previous good one: %li\n",lmt->rec_skp_vld_prv);
(void)fprintf(stdout,"Records remaining-to-be-read in current group: %li\n",lmt->rec_rmn_prv_ssc);
(void)fprintf(stdout,"Is multi-record output: %d\n",lmt->flg_mro);
(void)fprintf(stdout,"Is multi-subcycle output: %d\n",lmt->flg_mso);
(void)fprintf(stdout,"No more files need be opened: %d\n",lmt->flg_input_complete);
(void)fprintf(stdout,"Is record dimension: %d\n",lmt->is_rec_dmn);
(void)fprintf(stdout,"Any part is user-specified: %d\n",lmt->is_usr_spc_lmt);
(void)fprintf(stdout,"Is user-specified maximum: %d\n",lmt->is_usr_spc_max);
(void)fprintf(stdout,"Is user-specified minimum: %d\n",lmt->is_usr_spc_min);
(void)fprintf(stdout,"Calendar-type attribute: %d\n",lmt->cln_typ);
} /* end nco_lmt_prn() */
void
nco_lmt_cpy /* [fnc] Deep-copy a Limit structure */
(const lmt_sct * const lmt1, /* I [sct] Limit structure to copy */
lmt_sct *lmt2) /* O [sct] New limit structure (must be alloced before) */
{
assert(lmt1->nm);
/* Initialize to NULL/invalid */
(void)nco_lmt_init(lmt2);
lmt2->nm=(char *)strdup(lmt1->nm);
if(lmt1->nm_fll) lmt2->nm_fll=(char *)strdup(lmt1->nm_fll);
if(lmt1->grp_nm_fll) lmt2->grp_nm_fll=(char *)strdup(lmt1->grp_nm_fll);
if(lmt1->max_sng) lmt2->max_sng=(char *)strdup(lmt1->max_sng);
if(lmt1->min_sng) lmt2->min_sng=(char *)strdup(lmt1->min_sng);
if(lmt1->ssc_sng) lmt2->ssc_sng=(char *)strdup(lmt1->ssc_sng);
if(lmt1->ilv_sng) lmt2->ilv_sng=(char *)strdup(lmt1->ilv_sng);
if(lmt1->rbs_sng) lmt2->rbs_sng=(char *)strdup(lmt1->rbs_sng);
if(lmt1->srd_sng) lmt2->srd_sng=(char *)strdup(lmt1->srd_sng);
lmt2->max_val=lmt1->max_val;
lmt2->min_val=lmt1->min_val;
lmt2->origin=lmt1->origin;
lmt2->id=lmt1->id;
lmt2->lmt_typ=lmt1->lmt_typ;
lmt2->cnt=lmt1->cnt;
lmt2->ssc=lmt1->ssc;
lmt2->ilv=lmt1->ilv;
lmt2->end=lmt1->end;
lmt2->max_idx=lmt1->max_idx;
lmt2->min_idx=lmt1->min_idx;
lmt2->rec_dmn_sz=lmt1->rec_dmn_sz;
lmt2->rec_in_cml=lmt1->rec_in_cml;
lmt2->idx_end_max_abs=lmt1->idx_end_max_abs;
lmt2->rec_skp_ntl_spf=lmt1->rec_skp_ntl_spf;
lmt2->rec_skp_vld_prv=lmt1->rec_skp_vld_prv;
lmt2->rec_rmn_prv_ssc=lmt1->rec_rmn_prv_ssc;
lmt2->srd=lmt1->srd;
lmt2->srt=lmt1->srt;
lmt2->flg_ilv=lmt1->flg_ilv;
lmt2->flg_mro=lmt1->flg_mro;
lmt2->flg_mso=lmt1->flg_mso;
lmt2->flg_input_complete=lmt1->flg_input_complete;
lmt2->is_rec_dmn=lmt1->is_rec_dmn;
lmt2->is_usr_spc_lmt=lmt1->is_usr_spc_lmt;
lmt2->is_usr_spc_max=lmt1->is_usr_spc_max;
lmt2->is_usr_spc_min=lmt1->is_usr_spc_min;
lmt2->cln_typ=lmt1->cln_typ;
} /* end nco_lmt_cpy() */
lmt_sct * /* O [sct] Pointer to free'd structure */
nco_lmt_free /* [fnc] Free memory associated with limit structure */
(lmt_sct *lmt) /* I/O [sct] Limit structure to free */
{
/* Threads: Routine is thread safe and calls no unsafe routines */
/* Purpose: Free all memory associated with dynamically allocated limit structure */
lmt->nm=(char *)nco_free(lmt->nm);
lmt->nm_fll=(char *)nco_free(lmt->nm_fll);
lmt->grp_nm_fll=(char *)nco_free(lmt->grp_nm_fll);
lmt->ilv_sng=(char *)nco_free(lmt->ilv_sng);
lmt->max_sng=(char *)nco_free(lmt->max_sng);
lmt->min_sng=(char *)nco_free(lmt->min_sng);
lmt->srd_sng=(char *)nco_free(lmt->srd_sng);
lmt->ssc_sng=(char *)nco_free(lmt->ssc_sng);
lmt->rbs_sng=(char *)nco_free(lmt->rbs_sng);
lmt=(lmt_sct *)nco_free(lmt);
return lmt;
} /* end nco_lmt_free() */
lmt_sct ** /* O [sct] Pointer to free'd structure list */
nco_lmt_lst_free /* [fnc] Free memory associated with limit structure list */
(lmt_sct **lmt_lst, /* I/O [sct] Limit structure list to free */
const int lmt_nbr) /* I [nbr] Number of limit structures in list */
{
/* Threads: Routine is thread safe and calls no unsafe routines */
/* Purpose: Free all memory associated with dynamically allocated limit structure list */
int idx;
for(idx=0;idx<lmt_nbr;idx++) lmt_lst[idx]=nco_lmt_free(lmt_lst[idx]);
/* Free structure pointer last */
lmt_lst=(lmt_sct **)nco_free(lmt_lst);
return lmt_lst;
} /* end nco_lmt_lst_free() */
lmt_msa_sct * /* O [sct] Pointer to free'd structure */
nco_lmt_all_free /* [fnc] Free memory associated with limit structure */
(lmt_msa_sct *lmt_all) /* I/O [sct] Limit structure to free */
{
/* Threads: Routine is thread safe and calls no unsafe routines */
/* Purpose: Free all memory associated with dynamically allocated lmt_all structure */
lmt_all->dmn_nm=(char *)nco_free(lmt_all->dmn_nm);
/* NB: lmt_dmn[idx] are free'd by nco_lmt_lst_free() in calling routine */
lmt_all->lmt_dmn=(lmt_sct **)nco_free(lmt_all->lmt_dmn);
lmt_all=(lmt_msa_sct *)nco_free(lmt_all);
return lmt_all;
} /* end nco_lmt_all_free() */
lmt_msa_sct ** /* O [sct] Pointer to free'd structure list */
nco_lmt_all_lst_free /* [fnc] Free memory associated with lmt_all structure list */
(lmt_msa_sct **lmt_all_lst, /* I/O [sct] Limit structure list to free */
const int lmt_all_nbr) /* I [nbr] Number of limit strucgtures in list */
{
/* Threads: Routine is thread safe and calls no unsafe routines */
/* Purpose: Free all memory associated with dynamically allocated lmt_msa_sct structure list */
int idx;
for(idx=0;idx<lmt_all_nbr;idx++) lmt_all_lst[idx]=nco_lmt_all_free(lmt_all_lst[idx]);
/* Free structure pointer last */
lmt_all_lst=(lmt_msa_sct **)nco_free(lmt_all_lst);
return lmt_all_lst;
} /* end nco_lmt_all_lst_free() */
lmt_sct * /* [sct] Limit structure for dimension */
nco_lmt_sct_mk /* [fnc] Create stand-alone limit structure for given dimension */
(const int nc_id, /* I [idx] netCDF file ID */
const int dmn_id, /* I [idx] ID of dimension for this limit structure */
CST_X_PTR_CST_PTR_CST_Y(lmt_sct,lmt), /* I [sct] Array of limit structures from nco_lmt_evl() */
int lmt_nbr, /* I [nbr] Number of limit structures */
const nco_bool FORTRAN_IDX_CNV) /* I [flg] Hyperslab indices obey Fortran convention */
{
/* Purpose: Create stand-alone limit structure just for given dimension
ncra.c calls nco_lmt_sct_mk() to generate record dimension limit structure
This is a complex routine fundamental to most of NCO
It is easy to make subtle errors when changing it
Please ask CSZ to review any significant patches to this routine */
int idx;
int rcd; /* [rcd] Return code */
lmt_sct *lmt_dim;
lmt_dim=(lmt_sct *)nco_malloc(sizeof(lmt_sct));
/* Initialize defaults to False, override later if warranted */
lmt_dim->is_usr_spc_lmt=False; /* True if any part of limit is user-specified, else False */
lmt_dim->is_usr_spc_max=False; /* True if user-specified, else False */
lmt_dim->is_usr_spc_min=False; /* True if user-specified, else False */
/* rec_skp_ntl_spf, rec_skp_vld_prv, rec_in_cml, and rec_rmn_prv_ssc only used for MFO record dimension */
lmt_dim->rec_skp_ntl_spf=0L; /* Number of records skipped in initial superfluous files */
lmt_dim->rec_skp_vld_prv=0L; /* Number of records skipped since previous good one */
lmt_dim->rec_in_cml=0L; /* Number of records, read or not, in previously processed files */
lmt_dim->rec_rmn_prv_ssc=0L; /* Records remaining-to-be-read to complete subcycle group from previous file */
for(idx=0;idx<lmt_nbr;idx++){
/* Copy user-specified limits, if any */
if(lmt[idx]->id == dmn_id){
lmt_dim->is_usr_spc_lmt=True; /* True if any part of limit is user-specified, else False */
if(lmt[idx]->max_sng == NULL){
lmt_dim->max_sng=NULL;
}else{
lmt_dim->max_sng=(char *)strdup(lmt[idx]->max_sng);
lmt_dim->is_usr_spc_max=True; /* True if user-specified, else False */
} /* end if */
if(lmt[idx]->min_sng == NULL){
lmt_dim->min_sng=NULL;
}else{
lmt_dim->min_sng=(char *)strdup(lmt[idx]->min_sng);
lmt_dim->is_usr_spc_min=True; /* True if user-specified, else False */
} /* end if */
if(lmt[idx]->srd_sng) lmt_dim->srd_sng=(char *)strdup(lmt[idx]->srd_sng); else lmt_dim->srd_sng=NULL;
if(lmt[idx]->ssc_sng) lmt_dim->ssc_sng=(char *)strdup(lmt[idx]->ssc_sng); else lmt_dim->ssc_sng=NULL;
if(lmt[idx]->ilv_sng) lmt_dim->ilv_sng=(char *)strdup(lmt[idx]->ilv_sng); else lmt_dim->ilv_sng=NULL;
lmt_dim->nm=(char *)strdup(lmt[idx]->nm);
break;
} /* end if */
} /* end loop over idx */
/* If this limit was not user-specified, then ... */
if(idx == lmt_nbr){
/* Create default limits to look as though user-specified them */
char dmn_nm[NC_MAX_NAME];
long cnt;
int max_sng_sz;
/* Fill-in limits with default parsing information */
rcd=nco_inq_dim_flg(nc_id,dmn_id,dmn_nm,&cnt);
if(rcd == NC_EBADDIM){
(void)fprintf(stdout,"%s: ERROR attempting to find non-existent dimension with ID = %d in nco_lmt_sct_mk()\n",nco_prg_nm_get(),dmn_id);
return False;
} /* end if */
lmt_dim->nm=(char *)strdup(dmn_nm);
lmt_dim->srd_sng=NULL;
lmt_dim->ssc_sng=NULL;
lmt_dim->ilv_sng=NULL;
/* Generate min and max strings to look as if user had specified them
Adjust accordingly if FORTRAN_IDX_CNV was requested for other dimensions
These sizes will later be decremented in nco_lmt_evl() where all information
is converted internally to C-based indexing representation.
Ultimately this problem arises because I want nco_lmt_evl() to think the
user always did specify this dimension's hyperslab.
Otherwise, problems arise when FORTRAN_IDX_CNV is specified by the user
along with explicit hypersalbs for some dimensions excluding the record
dimension.
Then, when nco_lmt_sct_mk() creates the record dimension structure, it must
be created consistently with the FORTRAN_IDX_CNV flag for the other dimensions.
In order to do that, fill-in max_sng, min_sng, and srd_sng
arguments with strings as if they had been read from keyboard.
An alternate solution is to add flag to lmt_sct indicating whether this
limit struct had been automatically generated and then act accordingly. */
/* Decrement cnt to C-index value if necessary */
if(!FORTRAN_IDX_CNV) cnt--;
if(cnt < 0L){
if(cnt == -1L) (void)fprintf(stdout,"%s: ERROR nco_lmt_sct_mk() reports record variable exists and is size zero, i.e., has no records yet.\n",nco_prg_nm_get());
(void)fprintf(stdout,"%s: HINT: Perform record-oriented operations only after file has valid records.\n",nco_prg_nm_get());
(void)fprintf(stdout,"%s: cnt < 0 in nco_lmt_sct_mk()\n",nco_prg_nm_get());
return False;
} /* end if */
/* cnt < 10 covers negative numbers and SIGFPE from log10(cnt==0)
Adding 1 is required for cnt=10,100,1000... */
if(cnt < 10L) max_sng_sz=1; else max_sng_sz=1+(int)ceil(log10((double)cnt));
/* Add one for NUL terminator */
lmt_dim->max_sng=(char *)nco_malloc(sizeof(char)*(max_sng_sz+1));
(void)sprintf(lmt_dim->max_sng,"%ld",cnt);
if(FORTRAN_IDX_CNV){
lmt_dim->min_sng=(char *)strdup("1");
}else{
lmt_dim->min_sng=(char *)strdup("0");
} /* end else */
} /* end if user did not explicity specify limits for this dimension */
return lmt_dim;
} /* end nco_lmt_sct_mk() */
lmt_sct ** /* O [sct] Structure list with user-specified strings for min and max limits */
nco_lmt_prs /* [fnc] Create limit structures with name, min_sng, max_sng elements */
(const int lmt_nbr, /* I [nbr] number of dimensions with limits */
CST_X_PTR_CST_PTR_CST_Y(char,lmt_arg)) /* I [sng] List of user-specified dimension limits */
{
/* Purpose: Set name, min_sng, max_sng elements of comma separated list of names and ranges.
Routine merely evaluates syntax of input expressions and does validate dimensions or
ranges against those present in input netCDF file. */
/* Valid syntax adheres to nm,[min_sng][,[max_sng][,[srd_sng][,[ssc_sng]]]] */
char **arg_lst;
char *msg_sng=NULL_CEWI; /* [sng] Error message */
const char dlm_sng[]=",";
lmt_sct **lmt=NULL_CEWI;
int idx;
int arg_nbr;
nco_bool NCO_SYNTAX_ERROR=False; /* [flg] Syntax error in hyperslab specification */
if(lmt_nbr > 0) lmt=(lmt_sct **)nco_malloc(lmt_nbr*sizeof(lmt_sct *));
for(idx=0;idx<lmt_nbr;idx++){
/* Process hyperslab specifications as normal text list */
arg_lst=nco_lst_prs_2D(lmt_arg[idx],dlm_sng,&arg_nbr);
/* Check syntax */
if(arg_nbr < 2){ /* Need more than just dimension name */
msg_sng=strdup("Hyperslab options must specify at least two arguments (the first argument is the dimension name, the second is the minimum index, etc.)");
NCO_SYNTAX_ERROR=True;
}else if(arg_nbr > 6){ /* Too much information */
msg_sng=strdup("Too many (more than 6) arguments");
NCO_SYNTAX_ERROR=True;
}else if(arg_lst[0] == NULL){ /* Dimension name not specified */
msg_sng=strdup("Dimension name not specified");
NCO_SYNTAX_ERROR=True;
}else if(arg_nbr == 2 && arg_lst[1] == NULL){ /* No min specified */
msg_sng=strdup("Must specify minimum value");
NCO_SYNTAX_ERROR=True;
}else if(arg_nbr == 3 && arg_lst[1] == NULL && arg_lst[2] == NULL){ /* No min or max when stride not specified */
msg_sng=strdup("Must specify minimum and/or maximum value since stride is also empty");
NCO_SYNTAX_ERROR=True;
}else if(arg_nbr == 4 && arg_lst[3] == NULL){ /* Stride should be specified */
msg_sng=strdup("Stride must be specified (and be a positive integer)");
NCO_SYNTAX_ERROR=True;
}else if(arg_nbr == 5 && arg_lst[4] == NULL){ /* Subcycle should be specified */
msg_sng=strdup("Subcycle must be specified (and be a positive integer)");
NCO_SYNTAX_ERROR=True;
}else if(arg_nbr == 6 && arg_lst[5] == NULL){ /* Group-mode should be specified */
msg_sng=strdup("Group-mode must be specified (as 'm' or 'M')");
NCO_SYNTAX_ERROR=True;
} /* end else */
if(NCO_SYNTAX_ERROR){
(void)fprintf(stdout,"%s: ERROR parsing hyperslab specification for dimension %s\n%s\n%s: HINT Conform request to hyperslab documentation at http://nco.sf.net/nco.html#hyp\n",nco_prg_nm_get(),lmt_arg[idx],msg_sng,nco_prg_nm_get());
msg_sng=(char *)nco_free(msg_sng);
nco_exit(EXIT_FAILURE);
} /* !NCO_SYNTAX_ERROR */
/* Initialize structure */
/* lmt strings that are not explicitly set by user remain NULL, i.e.,
specifying default setting will appear as if nothing at all was set.
Hopefully, in routines that follow, branch followed when dimension has
all default settings specified (e.g.,"-d foo,,,,") yields same answer
as branch for which no hyperslab along that dimension was set. */
lmt[idx]=(lmt_sct *)nco_malloc(sizeof(lmt_sct));
/* Initialize to NULL/invalid */
(void)nco_lmt_init(lmt[idx]);
lmt[idx]->nm=NULL;
lmt[idx]->is_usr_spc_lmt=True; /* True if any part of limit is user-specified, else False */
lmt[idx]->min_sng=NULL;
lmt[idx]->max_sng=NULL;
lmt[idx]->srd_sng=NULL;
lmt[idx]->ssc_sng=NULL;
lmt[idx]->ilv_sng=NULL;
/* rec_skp_ntl_spf is used for record dimension in multi-file operators */
lmt[idx]->rec_skp_ntl_spf=0L; /* Number of records skipped in initial superfluous files */
/* Fill-in structure */
lmt[idx]->nm=arg_lst[0];
lmt[idx]->min_sng=arg_lst[1];
/* Setting min_sng and max_sng to same pointer would lead to dangerous double-free() condition */
if(arg_nbr <= 2) lmt[idx]->max_sng=(char *)strdup(arg_lst[1]);
if(arg_nbr > 2) lmt[idx]->max_sng=arg_lst[2];
if(arg_nbr > 3) lmt[idx]->srd_sng=arg_lst[3];
if(arg_nbr > 4) lmt[idx]->ssc_sng=arg_lst[4];
if(arg_nbr > 5) lmt[idx]->ilv_sng=arg_lst[5];
if(lmt[idx]->max_sng == NULL) lmt[idx]->is_usr_spc_max=False; else lmt[idx]->is_usr_spc_max=True;
if(lmt[idx]->min_sng == NULL) lmt[idx]->is_usr_spc_min=False; else lmt[idx]->is_usr_spc_min=True;
/* Initialize types used to re-base coordinate variables */
lmt[idx]->origin=0.0;
lmt[idx]->rbs_sng=NULL_CEWI;
lmt[idx]->cln_typ=cln_nil;
/* 20130903: Initialize cumulative number of records in all files opened so far (multi-file record dimension only) */
lmt[idx]->rec_in_cml=0L;
/* Free current pointer array to strings, leaving untouched the strings themselves
They will be free()'d with limit structures in nco_lmt_lst_free() */
arg_lst=(char **)nco_free(arg_lst);
} /* End loop over lmt structure list */
return lmt;
} /* end nco_lmt_prs() */
int /* O [enm] Limit type */
nco_lmt_typ /* [fnc] Determine limit type */
(char *sng) /* I [ptr] Pointer to limit string */
{
/* Purpose: Determine type of user-specified limit */
/* Test for UDUnits unit string, then simple coordinate,
then date/time string (i.e., YYYY-MM-DD), else default to dimensional index */
/* Space delimits user-specified units, e.g., "3 meters" */
if(strchr(sng,' ')) return lmt_udu_sng;
/* Colon delimits user-specified units, e.g., '1918-11-11 11:00:0.0' */
if(strchr(sng,':')) return lmt_udu_sng;
/* Decimal point (very common so check early), e.g., "3.0" */
if(strchr(sng,'.')) return lmt_crd_val;
/* Non-decimal (non-UDUnits) coordinate value, e.g., "3e10" or "3d10" */
if(strchr(sng,'E') || strchr(sng,'e') || strchr(sng,'D') || strchr(sng,'d')) return lmt_crd_val;
/* Other date-like strings */
if(
/* String contains non-leading dash with yyyy-mm-dd */
(strchr(sng,'-') && ((char *)strchr(sng,'-') != (char *)sng))
|| False){
int yyyy,mm,dd;
/* Scan for yyyy-mm-dd */
if(sscanf(sng,"%d-%d-%d",&yyyy,&mm,&dd) == 3) return lmt_udu_sng;
} /* endif date-like string */
/* Default: Limit is dimension index */
return lmt_dmn_idx;
} /* end nco_lmt_typ() */
char * /* O [sng] Units string */
nco_lmt_get_udu_att /* Returns specified attribute otherwise NULL */
(const int nc_id, /* I [idx] netCDF file ID */
const int var_id,
const char *att_nm) /* I [id] Variable ID whose attribute to read */
{
/* Grab units attribute from disk */
nc_type att_typ;
long att_sz;
char *fl_udu_sng=NULL_CEWI;
if(nco_inq_att_flg(nc_id,var_id,att_nm,&att_typ,&att_sz) == NC_NOERR){
/* Allocate memory for attribute */
if(att_typ == NC_CHAR){
fl_udu_sng=(char *)nco_malloc((att_sz+1UL)*sizeof(char));
/* Get 'units' attribute */
(void)nco_get_att(nc_id,var_id,att_nm,fl_udu_sng,att_typ);
fl_udu_sng[att_sz]='\0';
} /* !NC_CHAR */
} /* endif */
return fl_udu_sng;
} /* end nco_lmt_get_udu_att() */
void
nco_prn_lmt /* [fnc] Print limit information */
(lmt_sct lmt, /* I [sct] Limit structure */
int min_lmt_typ, /* I [nbr] Limit type */
nco_bool FORTRAN_IDX_CNV, /* I [flg] Hyperslab indices obey Fortran convention */
nco_bool flg_no_data_ok, /* I [flg] True if file contains no data for hyperslab */
long rec_usd_cml, /* I [nbr] Number of valid records already processed (only used for record dimensions in multi-file operators) */
monotonic_direction_enm monotonic_direction, /* I [enm] Monotonic_direction */
nco_bool rec_dmn_and_mfo, /* I [flg] True if record dimension in multi-file operator */
long cnt_rmn_ttl, /* I [nbr] Total records to be read from this and all remaining files */
long cnt_rmn_crr, /* I [nbr] Records to extract from current file */
long rec_skp_vld_prv_dgn) /* I [nbr] Records skipped at end of previous valid file, if any (diagnostic only) */
{
/* Purpose: Print limit information */
(void)fprintf(stderr,"Dimension hyperslabber nco_lmt_evl() diagnostics:\n");
(void)fprintf(stderr,"Dimension name = %s\n",lmt.nm);
(void)fprintf(stderr,"Limit type is %s\n",((min_lmt_typ == lmt_crd_val) || (min_lmt_typ == lmt_udu_sng)) ? "coordinate value" : (FORTRAN_IDX_CNV) ? "one-based dimension index" : "zero-based dimension index");
(void)fprintf(stderr,"Limit %s user-specified\n",(lmt.is_usr_spc_lmt) ? "is" : "is not");
(void)fprintf(stderr,"Limit %s record dimension\n",(lmt.is_rec_dmn) ? "is" : "is not");
(void)fprintf(stderr,"Current file %s specified hyperslab, data %s be read\n",(flg_no_data_ok) ? "is superfluous to" : "is required by",(flg_no_data_ok) ? "will not" : "will");
if(rec_dmn_and_mfo) (void)fprintf(stderr,"Cumulative number of records in all input files opened including this one = %li\n",lmt.rec_in_cml);
if(rec_dmn_and_mfo) (void)fprintf(stderr,"Records skipped in initial superfluous files = %li\n",lmt.rec_skp_ntl_spf);
if(rec_dmn_and_mfo) (void)fprintf(stderr,"Valid records read (and used) from previous files = %li\n",rec_usd_cml);
if(cnt_rmn_ttl != -1L) (void)fprintf(stderr,"Total records to be read from this and all following files = %li\n",cnt_rmn_ttl);
if(cnt_rmn_crr != -1L) (void)fprintf(stderr,"Records to be read from this file = %li\n",cnt_rmn_crr);
if(rec_skp_vld_prv_dgn != -1L) (void)fprintf(stderr,"rec_skp_vld_prv_dgn (previous file, if any) = %li \n",rec_skp_vld_prv_dgn);
if(rec_skp_vld_prv_dgn != -1L) (void)fprintf(stderr,"rec_skp_vld_prv (this file) = %li \n",lmt.rec_skp_vld_prv);
(void)fprintf(stderr,"min_sng = %s\n",lmt.min_sng == NULL ? "NULL" : lmt.min_sng);
(void)fprintf(stderr,"max_sng = %s\n",lmt.max_sng == NULL ? "NULL" : lmt.max_sng);
(void)fprintf(stderr,"srd_sng = %s\n",lmt.srd_sng == NULL ? "NULL" : lmt.srd_sng);
(void)fprintf(stderr,"ssc_sng = %s\n",lmt.ssc_sng == NULL ? "NULL" : lmt.ssc_sng);
(void)fprintf(stderr,"ilv_sng = %s\n",lmt.ilv_sng == NULL ? "NULL" : lmt.ilv_sng);
(void)fprintf(stderr,"monotonic_direction = %s\n",(monotonic_direction == not_checked) ? "not checked" : (monotonic_direction == increasing) ? "increasing" : "decreasing");
(void)fprintf(stderr,"min_val = %g\n",lmt.min_val);
(void)fprintf(stderr,"max_val = %g\n",lmt.max_val);
(void)fprintf(stderr,"min_idx = %li\n",lmt.min_idx);
(void)fprintf(stderr,"max_idx = %li\n",lmt.max_idx);
(void)fprintf(stderr,"srt = %li\n",lmt.srt);
(void)fprintf(stderr,"end = %li\n",lmt.end);
(void)fprintf(stderr,"cnt = %li\n",lmt.cnt);
(void)fprintf(stderr,"srd = %li\n",lmt.srd);
(void)fprintf(stderr,"ssc = %li\n",lmt.ssc);
(void)fprintf(stderr,"ilv = %li\n",lmt.ilv);
(void)fprintf(stderr,"WRP = %s\n",lmt.srt > lmt.end ? "YES" : "NO");
(void)fprintf(stderr,"SRD = %s\n",lmt.srd != 1L ? "YES" : "NO");
(void)fprintf(stderr,"SSC = %s\n",lmt.ssc != 1L ? "YES" : "NO");
(void)fprintf(stderr,"MRO = %s\n",lmt.flg_mro ? "YES" : "NO");
(void)fprintf(stderr,"MSO = %s\n",lmt.flg_mso ? "YES" : "NO");
(void)fprintf(stderr,"ILV = %s\n\n",lmt.flg_ilv ? "YES" : "NO");
} /* nco_prn_lmt() */
void
nco_lmt_evl /* [fnc] Parse user-specified limits into hyperslab specifications */
(const int grp_id, /* I [idx] netCDF group ID */
lmt_sct *lmt_ptr, /* I/O [sct] Structure from nco_lmt_prs() or from nco_lmt_sct_mk() to hold dimension limit information */
long rec_usd_cml, /* I [nbr] Number of valid records already processed (only used for record dimensions in multi-file operators) */
nco_bool FORTRAN_IDX_CNV) /* I [flg] Hyperslab indices obey Fortran convention */
{
/* NB: nco_lmt_evl() with same nc_id contains OpenMP critical region */
/* Purpose: Take parsed list of dimension names, minima, and
maxima strings and find appropriate indices into dimensions
for formulation of dimension start and count vectors, or fail trying. */
const char fnc_nm[]="nco_lmt_evl()";
char *fl_udu_sng=NULL_CEWI; /* Store units attribute of coordinate dimension */
char *msg_sng=NULL_CEWI; /* [sng] Error message */
char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */
nco_bool flg_no_data_err=False; /* True if domain brackets no data (and not an MFO/record coordinate) */
nco_bool flg_no_data_ok=False; /* True if file contains no data for hyperslab */
nco_bool rec_dmn_and_mfo=False; /* True if record dimension in multi-file operator */
nco_bool NCO_SYNTAX_ERROR=False; /* [flg] Syntax error in hyperslab specification */
dmn_sct dim;
lmt_sct lmt;
int min_lmt_typ=int_CEWI;
int max_lmt_typ=int_CEWI;
monotonic_direction_enm monotonic_direction=not_checked; /* CEWI */
int nco_prg_id; /* Program ID */
int rcd=NC_NOERR; /* [enm] Return code */
int rec_dmn_id; /* [idx] Variable ID of record dimension, if any */
int dmn_ids_ult[NC_MAX_DIMS]; /* [nbr] Unlimited dimensions IDs array */
int nbr_dmn_ult; /* [nbr] Number of unlimited dimensions */
int fl_fmt; /* [nbr] File format */
long dmn_sz;
long cnt_rmn_crr=-1L; /* Records to extract from current file */
long cnt_rmn_ttl=-1L; /* Total records to be read from this and all remaining files */
long rec_skp_vld_prv_dgn=-1L; /* Records skipped at end of previous valid file, if any (diagnostic only) */
lmt=*lmt_ptr;
nco_prg_id=nco_prg_id_get(); /* Program ID */
/* Initialize limit structure */
lmt.flg_mro=False;
lmt.flg_mso=False;
lmt.max_val=0.0;
lmt.min_val=0.0;
lmt.ssc=1L;
lmt.srd=1L;
lmt.ilv=1L;
lmt.flg_input_complete=False;
/* Get dimension ID from name */
rcd=nco_inq_dimid_flg(grp_id,lmt.nm,&lmt.id);
if(rcd != NC_NOERR){
(void)fprintf(stdout,"%s: ERROR dimension %s is not in input file\n",nco_prg_nm_get(),lmt.nm);
nco_exit(EXIT_FAILURE);
} /* endif */
/* Logic on whether to allow skipping current file depends on whether limit
is specified for record dimension in multi-file operators.
This information is not used in single-file operators, though whether
the limit is a record limit may be tested.
Program defensively and define this flag in all cases. */
(void)nco_inq_format(grp_id,&fl_fmt);
/* Obtain unlimited dimensions for group */
(void)nco_inq_unlimdims(grp_id,&nbr_dmn_ult,dmn_ids_ult);
rec_dmn_id=-1;
if(fl_fmt == NC_FORMAT_NETCDF4){
for(int idx_dmn=0;idx_dmn<nbr_dmn_ult;idx_dmn++)
/* Match IDs to get the ID of the record (both IDs from 'dmn_ids_ult' and 'lmt' are obtained here, and function is called on ncra file loop */
if(lmt.id == dmn_ids_ult[idx_dmn]) rec_dmn_id=dmn_ids_ult[idx_dmn];
}else{
rec_dmn_id=dmn_ids_ult[0];
} /* !netCDF4 */
if(lmt.id == rec_dmn_id) lmt.is_rec_dmn=True; else lmt.is_rec_dmn=False;
if(lmt.is_rec_dmn && (nco_prg_id == ncra || nco_prg_id == ncrcat)) rec_dmn_and_mfo=True; else rec_dmn_and_mfo=False;
/* Get dimension size */
(void)nco_inq_dimlen(grp_id,lmt.id,&dim.sz);
/* Shortcut to avoid indirection */
dmn_sz=dim.sz;
if(rec_dmn_and_mfo){
lmt.rec_dmn_sz=dmn_sz;
lmt.idx_end_max_abs=lmt.rec_in_cml+dmn_sz-1L; /* Maximum allowed index in record dimension */
} /* !rec_dmn_and_mfo */
/* Bomb if dmn_sz < 1 */
if(dmn_sz < 1L){
(void)fprintf(stdout,"%s: ERROR Size of dimension %s is %li in input file, but must be > 0 in order to apply limits.\n",nco_prg_nm_get(),lmt.nm,dmn_sz);
nco_exit(EXIT_FAILURE);
} /* end if */
if(lmt.srd_sng){
if(strchr(lmt.srd_sng,'.') || strchr(lmt.srd_sng,'e') || strchr(lmt.srd_sng,'E') || strchr(lmt.srd_sng,'d') || strchr(lmt.srd_sng,'D')){
(void)fprintf(stdout,"%s: ERROR Requested stride for %s, %s, must be integer\n",nco_prg_nm_get(),lmt.nm,lmt.srd_sng);
nco_exit(EXIT_FAILURE);
} /* end if */
lmt.srd=strtol(lmt.srd_sng,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(lmt.srd_sng,"strtol",sng_cnv_rcd);
if(lmt.srd < 1L){
(void)fprintf(stdout,"%s: ERROR Stride for %s is %li but must be > 0\n",nco_prg_nm_get(),lmt.nm,lmt.srd);
nco_exit(EXIT_FAILURE);
} /* end if */
} /* !lmt.srd_sng */
if(lmt.ssc_sng){
if(strchr(lmt.ssc_sng,'.') || strchr(lmt.ssc_sng,'e') || strchr(lmt.ssc_sng,'E') || strchr(lmt.ssc_sng,'d') || strchr(lmt.ssc_sng,'D')){
(void)fprintf(stdout,"%s: ERROR Requested subcycle argument for %s, %s, must be integer\n",nco_prg_nm_get(),lmt.nm,lmt.ssc_sng);
nco_exit(EXIT_FAILURE);
} /* end if */
lmt.ssc=strtol(lmt.ssc_sng,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(lmt.ssc_sng,"strtol",sng_cnv_rcd);
if(lmt.ssc < 1L){
(void)fprintf(stdout,"%s: ERROR Subcycle argument for %s is %li but must be > 0\n",nco_prg_nm_get(),lmt.nm,lmt.ssc);
nco_exit(EXIT_FAILURE);
} /* end if */
if(nco_prg_id != ncra && nco_prg_id != ncrcat){
(void)fprintf(stdout,"%s: ERROR Subcycle hyperslabs only implemented for ncra and ncrcat\n",nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
} /* end ncra */
} /* !lmt.ssc_sng */
if(lmt.ilv_sng){
if(strchr(lmt.ilv_sng,'.') || strchr(lmt.ilv_sng,'e') || strchr(lmt.ilv_sng,'E') || strchr(lmt.ilv_sng,'d') || strchr(lmt.ilv_sng,'D')){
(void)fprintf(stdout,"%s: ERROR Requested interleave stride argument for %s, %s, must be integer\n",nco_prg_nm_get(),lmt.nm,lmt.ilv_sng);
nco_exit(EXIT_FAILURE);
} /* end if */
lmt.ilv=strtol(lmt.ilv_sng,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(lmt.ilv_sng,"strtol",sng_cnv_rcd);
if(lmt.ilv < 1L){
(void)fprintf(stdout,"%s: ERROR Interleave stride argument for %s is %li but must be > 0\n",nco_prg_nm_get(),lmt.nm,lmt.ilv);
nco_exit(EXIT_FAILURE);
} /* end if */
if(nco_prg_id != ncra && nco_prg_id != ncrcat){
(void)fprintf(stdout,"%s: ERROR Interleave stride hyperslabs only implemented for ncra and ncrcat\n",nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
} /* end ncra */
if(lmt.ilv > 1L){
lmt.flg_ilv=True;
if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: quark1 lmt.ilv_sng = %s, lmt.ilv = %ld, flg_ilv = %s\n",nco_prg_nm_get(),lmt.ilv_sng == NULL ? "NULL" : lmt.ilv_sng,lmt.ilv,lmt.flg_ilv ? "YES" : "NO");
} /* !lmt.ilv */
} /* !lmt.ilv_sng */
/* In case flg_mro is set in ncra.c by --mro */
if(lmt.flg_mro){
if(nco_prg_id == ncrcat){
(void)fprintf(stdout,"%s: INFO Specifying Multi-Record Output (MRO) option (--mro) is redundant. MRO is always true for ncrcat.\n",nco_prg_nm_get());
}else if(nco_prg_id != ncra){
(void)fprintf(stdout,"%s: ERROR Multi-Record Output (MRO) option (--mro) is only valid for ncra.\n",nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
} /* end else */
} /* !lmt.flg_mro */
/* In case flg_mso is set in ncra.c by --mso */
if(lmt.flg_mso){
if(nco_prg_id == ncrcat){
(void)fprintf(stdout,"%s: INFO Specifying Multi-Subcycle Output (MSO) option (--mso) is redundant. MSO is always true for ncrcat.\n",nco_prg_nm_get());
}else if(nco_prg_id != ncra){
(void)fprintf(stdout,"%s: ERROR Multi-Subcycle Output (MSO) option (--mso) is only valid for ncra.\n",nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
} /* end else */
} /* !lmt.flg_mso */
/* 20200721 Context-sensitive argument inferral makes default (blank) arguments more useful
Order and mutual-exclusivity of these conditions is important */
if(lmt.ilv_sng && !lmt.ssc_sng && !lmt.srd_sng){
lmt.ssc=lmt.ilv;
lmt.srd=lmt.ssc;
}else if(lmt.ilv_sng && !lmt.ssc_sng){
lmt.ssc=lmt.ilv;
}else if(lmt.ilv_sng && !lmt.srd_sng){
lmt.srd=lmt.ssc;
}else if(lmt.ssc_sng && !lmt.srd_sng){
lmt.srd=lmt.ssc;
} /* lmt.ilv */
/* Set MRO whenever interleave is explicitly requested */
if(lmt.ilv_sng) lmt.flg_mro=True;
/* Set MSO whenever interleave is explicitly requested */
if(lmt.ilv_sng) lmt.flg_mso=True;
/* If min_sng and max_sng are both NULL then set type to lmt_dmn_idx */
if(lmt.min_sng == NULL && lmt.max_sng == NULL){
/* Limiting indices will be set to default extrema a bit later */
min_lmt_typ=max_lmt_typ=lmt_dmn_idx;
}else{
/* min_sng and max_sng are not both NULL */
/* Limit is coordinate value if string contains decimal point or is in exponential format
Otherwise limit is interpreted as zero-based dimension offset */
if(lmt.min_sng) min_lmt_typ=nco_lmt_typ(lmt.min_sng);
if(lmt.max_sng) max_lmt_typ=nco_lmt_typ(lmt.max_sng);
/* Copy lmt_typ from defined limit to undefined */
if(!lmt.min_sng) min_lmt_typ=max_lmt_typ;
if(!lmt.max_sng) max_lmt_typ=min_lmt_typ;
} /* end else */
/* Both min_lmt_typ and max_lmt_typ are now defined
Continue only if both limits are of the same type */
if(min_lmt_typ != max_lmt_typ){
(void)fprintf(stdout,"%s: ERROR -d %s,%s,%s\n",nco_prg_nm_get(),lmt.nm,lmt.min_sng,lmt.max_sng);
(void)fprintf(stdout,"Limits on dimension \"%s\" must be of same numeric type:\n",lmt.nm);
(void)fprintf(stdout,"\"%s\" was interpreted as a %s.\n",lmt.min_sng,((min_lmt_typ == lmt_crd_val) || (min_lmt_typ == lmt_udu_sng)) ? "coordinate value" : (FORTRAN_IDX_CNV) ? "one-based dimension index" : "zero-based dimension index");
(void)fprintf(stdout,"\"%s\" was interpreted as a %s.\n",lmt.max_sng,((max_lmt_typ == lmt_crd_val) || (max_lmt_typ == lmt_udu_sng)) ? "coordinate value" : (FORTRAN_IDX_CNV) ? "one-based dimension index" : "zero-based dimension index");
(void)fprintf(stdout,"(Limit arguments containing a decimal point (or in exponential format) are interpreted as coordinate values; arguments without a decimal point are interpreted as zero-based or one-based (depending on -F switch) dimensional indices.)\n");
nco_exit(EXIT_FAILURE);
} /* end if */
lmt.lmt_typ=min_lmt_typ;
/* Coordinate re-basing code */
lmt.origin=0.0;
/* Get variable ID of coordinate */
rcd=nco_inq_varid_flg(grp_id,lmt.nm,&dim.cid);
if(rcd == NC_NOERR){
char *cln_sng=NULL_CEWI;
fl_udu_sng=nco_lmt_get_udu_att(grp_id,dim.cid,"units"); /* Units attribute of coordinate variable */
cln_sng=nco_lmt_get_udu_att(grp_id,dim.cid,"calendar"); /* Calendar attribute */
if(rec_dmn_and_mfo && fl_udu_sng && lmt.rbs_sng){
#ifdef ENABLE_UDUNITS
/* Re-base and reset origin to 0.0 if re-basing fails
if(nco_cln_clc_org(fl_udu_sng,lmt.rbs_sng,lmt.cln_typ,&lmt.origin) != NCO_NOERR) lmt.origin=0.0;
*/
#endif /* !ENABLE_UDUNITS */
} /* endif */
/* ncra and ncrcat read "calendar" attribute in main()
Avoid multiple reads of calendar attribute in multi-file operations */
if(!rec_dmn_and_mfo){
if(cln_sng) lmt.cln_typ=nco_cln_get_cln_typ(cln_sng); else lmt.cln_typ=cln_nil;
} /* endif */
if(cln_sng) cln_sng=(char *)nco_free(cln_sng);
} /* end if limit is coordinate */
if((lmt.lmt_typ == lmt_crd_val) || (lmt.lmt_typ == lmt_udu_sng)){
double *dmn_val_dp;
double dmn_max;
double dmn_min;
long max_idx;
long min_idx;
long tmp_idx;
long dmn_srt=0L;
/* Get coordinate type */
(void)nco_inq_vartype(grp_id,dim.cid,&dim.type);
/* Warn when coordinate type is weird */
if(dim.type == NC_BYTE || dim.type == NC_UBYTE || dim.type == NC_CHAR || dim.type == NC_STRING) (void)fprintf(stderr,"\n%s: WARNING Coordinate %s is type %s. Dimension truncation is unpredictable.\n",nco_prg_nm_get(),lmt.nm,nco_typ_sng(dim.type));
/* if(lmt.ssc != 1L) (void)fprintf(stderr,"\n%s: WARNING Hyperslabs for %s are based on coordinate values rather than dimension indices. The behavior of the subcycle hyperslab argument is ill-defined, unpredictable, and unsupported for coordinate-based hyperslabs. Only min, max, and stride are supported for coordinate-value based hyperslabbing. Subcycle may or may not work as you intend. Use at your own risk.\n",nco_prg_nm_get(),lmt.nm); */
/* Allocate enough space to hold coordinate */
dmn_val_dp=(double *)nco_malloc(dmn_sz*nco_typ_lng(NC_DOUBLE));
#ifdef _OPENMP
#pragma omp critical
#endif /* _OPENMP */
{ /* begin OpenMP critical */
/* Block is critical for identical in_id's
Block is thread-safe for distinct in_id's */
/* 20110221: replace nco_get_vara() with nc_get_vara_double() */
/* Retrieve this coordinate */
nc_get_vara_double(grp_id,dim.cid,(const size_t *)&dmn_srt,(const size_t *)&dmn_sz,dmn_val_dp);
} /* end OpenMP critical */
/* Officially change type */
dim.type=NC_DOUBLE;
/* Assuming coordinate is monotonic, direction of monotonicity is determined by first two elements */
if(dmn_sz == 1L){
monotonic_direction=increasing;
}else{
if(dmn_val_dp[0] > dmn_val_dp[1]) monotonic_direction=decreasing; else monotonic_direction=increasing;
} /* end else */
if(monotonic_direction == increasing){
min_idx=0L;
max_idx=dmn_sz-1L;
}else{
min_idx=dmn_sz-1L;
max_idx=0L;
} /* end else */
/* Determine min and max values of entire coordinate */
dmn_min=dmn_val_dp[min_idx];
dmn_max=dmn_val_dp[max_idx];
/* Set defaults */
lmt.min_val=dmn_val_dp[min_idx];
lmt.max_val=dmn_val_dp[max_idx];
/* Convert UDUnits strings if necessary */
/* If we are here then either min_sng or max_sng or both are set */
if(lmt.lmt_typ == lmt_udu_sng){
if(!fl_udu_sng){
(void)fprintf(stdout,"%s: ERROR attempting to read units attribute from variable \"%s\" \n",nco_prg_nm_get(),dim.nm);
nco_exit(EXIT_FAILURE);
} /* end if */
if(lmt.min_sng)
if(nco_cln_clc_dbl_org(lmt.min_sng,fl_udu_sng,lmt.cln_typ,&lmt.min_val) != NCO_NOERR)
nco_exit(EXIT_FAILURE);
if(lmt.max_sng)
if(nco_cln_clc_dbl_org(lmt.max_sng,fl_udu_sng,lmt.cln_typ,&lmt.max_val) != NCO_NOERR)
nco_exit(EXIT_FAILURE);
}else{ /* end UDUnits conversion */
/* Convert user-specified limits into double precision numeric values, or supply defaults */
if(lmt.min_sng){
lmt.min_val=strtod(lmt.min_sng,&sng_cnv_rcd);
if(*sng_cnv_rcd) nco_sng_cnv_err(lmt.min_sng,"strtod",sng_cnv_rcd);
} /* !lmt.min_sng */
if(lmt.max_sng){
lmt.max_val=strtod(lmt.max_sng,&sng_cnv_rcd);
if(*sng_cnv_rcd) nco_sng_cnv_err(lmt.max_sng,"strtod",sng_cnv_rcd);
} /* !lmt.max_sng */
/* Re-base coordinates as necessary in multi-file operatators (MFOs)
lmt.origin was calculated earlier in routine */
if(rec_dmn_and_mfo && fl_udu_sng && lmt.rbs_sng && strcmp(fl_udu_sng,lmt.rbs_sng)){
if(lmt.min_sng)
if(nco_cln_clc_dbl_var_dff(lmt.rbs_sng,fl_udu_sng,lmt.cln_typ,&lmt.min_val,(var_sct *)NULL) != NCO_NOERR)
nco_exit(EXIT_FAILURE);
if(lmt.max_sng)
if(nco_cln_clc_dbl_var_dff(lmt.rbs_sng,fl_udu_sng,lmt.cln_typ,&lmt.max_val,(var_sct *)NULL) != NCO_NOERR)
nco_exit(EXIT_FAILURE);
if(nco_dbg_lvl_get() > nco_dbg_std) fprintf(stdout,"%s: INFO nco_lmt rebasing min_val=%f max_val=%f\n",nco_prg_nm_get(),lmt.min_val,lmt.max_val);
} /* endif MFO */
} /* end UDUnits conversion */
/* Warn when min_val > max_val (i.e., wrapped coordinate) */
if(nco_dbg_lvl_get() > nco_dbg_std && lmt.min_val > lmt.max_val) (void)fprintf(stderr,"%s: INFO Interpreting hyperslab specifications as wrapped coordinates [%s <= %g] and [%s >= %g]\n",nco_prg_nm_get(),lmt.nm,lmt.max_val,lmt.nm,lmt.min_val);
/* Fail when... */
if(
/* Following condition added 20000508, changes behavior of single point
hyperslabs depending on whether hyperslab occurs in record dimension
during multi-file operator operation.
Altered behavior of single point hyperslabs so that single point
hyperslabs in the record coordinate (i.e., -d time,1.0,1.0) may be
treated differently than single point hyperslabs in other
coordinates. Multifile operators will skip files if single point
hyperslabs in record coordinate lay outside record coordinate
range of file. For non-record coordinates (and for all operators
besides ncra and ncrcat on record coordinates), single point
hyperslabs will choose the closest value rather than skip the file
(I believe). This should be verified. */
/* User specified single point, coordinate is not wrapped, and both extrema fall outside valid crd range */
(rec_dmn_and_mfo && (lmt.min_val == lmt.max_val) && ((lmt.min_val > dmn_max) || (lmt.max_val < dmn_min))) ||
/* User did not specify single point, coordinate is not wrapped, and either extrema falls outside valid crd range */
((lmt.min_val < lmt.max_val) && ((lmt.min_val > dmn_max) || (lmt.max_val < dmn_min))) ||
/* User did not specify single point, coordinate is wrapped, and both extrema fall outside valid crd range */
((lmt.min_val > lmt.max_val) && ((lmt.min_val > dmn_max) && (lmt.max_val < dmn_min))) ||
False){
/* Allow for possibility that current file is superfluous */
if(rec_dmn_and_mfo){
flg_no_data_ok=True;
goto no_data_ok;
}else{
(void)fprintf(stdout,"%s: ERROR User-specified coordinate value range %g <= %s <= %g does not fall within valid coordinate range %g <= %s <= %g\n",nco_prg_nm_get(),lmt.min_val,lmt.nm,lmt.max_val,dmn_min,lmt.nm,dmn_max);
nco_exit(EXIT_FAILURE);
} /* end else */
} /* end if */
/* Armed with target coordinate minima and maxima, we are ready to bracket user-specified range */
/* If min_sng or max_sng were omitted, use extrema */
if(lmt.min_sng == NULL) lmt.min_idx=min_idx;
if(lmt.max_sng == NULL) lmt.max_idx=max_idx;
/* Single slice requires finding the closest coordinate */
if(lmt.min_val == lmt.max_val){
double dst_new;
double dst_old;
lmt.min_idx=0L;
dst_old=fabs(lmt.min_val-dmn_val_dp[0]);
for(tmp_idx=1L;tmp_idx<dmn_sz;tmp_idx++){
if((dst_new=fabs(lmt.min_val-dmn_val_dp[tmp_idx])) < dst_old){
dst_old=dst_new;
lmt.min_idx=tmp_idx;
} /* end if */
} /* end loop over tmp_idx */
lmt.max_idx=lmt.min_idx;
}else{ /* min_val != max_val */
/* Bracket specified extrema:
Should no coordinate values match the given criteria, flag the index with -1L
We defined the valid syntax such that single half range with -1L is not an error
This causes "-d lon,100.0,-100.0" to select [-180.0] when lon=[-180.0,-90.0,0.0,90.0]
because one of the specified half-ranges is valid (there are coordinates < -100.0).
However, "-d lon,100.0,-200.0" should fail when lon=[-180.0,-90.0,0.0,90.0] because both
of the specified half-ranges are invalid (no coordinate is > 100.0 or < -200.0).
-1L flags are replaced with correct indices (0L or dmn_sz-1L) following search loop block.
Overwriting -1L flags with 0L or dmn_sz-1L later is more heuristic than setting them = 0L here,
since 0L is valid search result. */
if(monotonic_direction == increasing){
if(lmt.min_sng){
/* Find index of smallest coordinate greater than min_val */
tmp_idx=0L;
while((dmn_val_dp[tmp_idx] < lmt.min_val) && (tmp_idx < dmn_sz)) tmp_idx++;
if(tmp_idx != dmn_sz) lmt.min_idx=tmp_idx; else lmt.min_idx=-1L;
} /* end if */
if(lmt.max_sng){
/* Find index of largest coordinate less than max_val */
tmp_idx=dmn_sz-1L;
while((dmn_val_dp[tmp_idx] > lmt.max_val) && (tmp_idx > -1L)) tmp_idx--;
if(tmp_idx != -1L) lmt.max_idx=tmp_idx; else lmt.max_idx=-1L;
} /* end if */
/* 20110221: csz fix hyperslab bug TODO nco1007 triggered by
ncks -O -v lat -d lat,20.,20.001 ~/nco/data/in.nc ~/foo.nc
This returned all values but should have returned none
Algorithm was broken because, although valid min and max indices existed,
they contained the empty set.
Now when this happens, set flg_no_data_err block */
if( /* Points are not wrapped ... */
(lmt.min_val < lmt.max_val) &&
/* ... and valid indices were found for both bracketing points... */
(lmt.min_idx != -1L && lmt.max_idx != -1L) &&
/* ...and indices contain empty set, i.e., min_idx > max_idx for increasing data... */
lmt.min_idx > lmt.max_idx) flg_no_data_err=True;
/* end if monotonic_direction == increasing */
}else{ /* monotonic_direction == decreasing */
if(lmt.min_sng){
/* Find index of smallest coordinate greater than min_val */
tmp_idx=dmn_sz-1L;
while((dmn_val_dp[tmp_idx] < lmt.min_val) && (tmp_idx > -1L)) tmp_idx--;
if(tmp_idx != -1L) lmt.min_idx=tmp_idx; else lmt.min_idx=-1L;
} /* end if */
if(lmt.max_sng){
/* Find index of largest coordinate less than max_val */
tmp_idx=0L;
while((dmn_val_dp[tmp_idx] > lmt.max_val) && (tmp_idx < dmn_sz)) tmp_idx++;
if(tmp_idx != dmn_sz) lmt.max_idx=tmp_idx; else lmt.max_idx=-1L;
} /* end if */
if( /* Points are not wrapped ... */
(lmt.min_val > lmt.max_val) &&
/* ... and valid indices were found for both bracketing points... */
(lmt.min_idx != -1L && lmt.max_idx != -1L) &&
/* ...and indices contain empty set, i.e., min_idx < max_idx for decreasing data... */
lmt.min_idx < lmt.max_idx) flg_no_data_err=True;
} /* end else monotonic_direction == decreasing */
/* Case where both min_idx and max_idx = -1 was flagged as error above
Case of wrapped coordinate: Either, but not both, of min_idx or max_idx will be flagged with -1
See explanation above */
if(lmt.min_idx == -1L && (lmt.min_val > lmt.max_val)) lmt.min_idx=0L;
if(lmt.max_idx == -1L && (lmt.min_val > lmt.max_val)) lmt.max_idx=dmn_sz-1L;
} /* end if min_val != max_val */
/* User-specified ranges are now bracketed */
/* Convert indices of minima and maxima to srt and end indices */
if(monotonic_direction == increasing){
lmt.srt=lmt.min_idx;
lmt.end=lmt.max_idx;
}else{
lmt.srt=lmt.max_idx;
lmt.end=lmt.min_idx;
} /* end else */
/* Free space allocated for dimension */
dmn_val_dp=(double*)nco_free(dmn_val_dp);
if(rec_dmn_and_mfo){
/* No wrapping with multi-file operators */
if((monotonic_direction == increasing && lmt.min_val > lmt.max_val) ||
(monotonic_direction == decreasing && lmt.min_val < lmt.max_val)){
flg_no_data_ok=True;
goto no_data_ok;
} /* endif */
if(rec_usd_cml == 0L){
/* Skipped records remains zero until valid records are processed */
lmt.rec_skp_vld_prv=0L;
}else if(rec_usd_cml > 0L){
/* Otherwise, adjust starting index by records skipped in jumps across file boundaries */
lmt.srt+=lmt.srd-1L-lmt.rec_skp_vld_prv%lmt.srd;
if(lmt.srt > lmt.end){
/* Do not allow record dimension wrapping in MFOs */
flg_no_data_ok=True;
goto no_data_ok;
} /* endif */
} /* endif */
/* If we are here then there are valid records in current file */
} /* end if rec_dmn_and_mfo */
}else{ /* end if limit arguments were coordinate values */
/* Convert limit strings to zero-based indicial offsets */
/* Specifying stride alone, but not min or max, is legal, e.g., -d time,,,2
Thus is_usr_spc_lmt may be True, even though one or both of min_sng, max_sng is NULL
Furthermore, both min_sng and max_sng are artifically created by nco_lmt_sct_mk()
for record dimensions when the user does not explicitly specify limits.
In this case, min_sng_and max_sng are non-NULL though no limits were specified
In fact, min_sng and max_sng are set to the minimum and maximum string
values of the first file processed.
However, we can tell if these strings were artificially generated because
nco_lmt_sct_mk() sets the is_usr_spc_lmt flag to False in such cases.
Subsequent files may have different numbers of records, but nco_lmt_sct_mk()
is only called once.
Thus we must update min_idx and max_idx here for each file
This causes min_idx and max_idx to be out of sync with min_sng and max_sng,
which are only set in nco_lmt_sct_mk() for the first file.
In hindsight, artificially generating min_sng and max_sng may be bad idea */
/* Following logic is messy, but hard to simplify */
if(!lmt.min_sng || !lmt.is_usr_spc_lmt){
/* No user-specified value available--generate minimal dimension index */
if(FORTRAN_IDX_CNV) lmt.min_idx=1L; else lmt.min_idx=0L;
}else{
/* Use user-specified limit when available */
lmt.min_idx=strtol(lmt.min_sng,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(lmt.min_sng,"strtol",sng_cnv_rcd);
} /* end if */
if(!lmt.max_sng || !lmt.is_usr_spc_lmt){
/* No user-specified value available---generate maximal dimension index */
if(FORTRAN_IDX_CNV) lmt.max_idx=dmn_sz; else lmt.max_idx=dmn_sz-1L;
}else{
/* Use user-specified limit when available */
lmt.max_idx=strtol(lmt.max_sng,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(lmt.max_sng,"strtol",sng_cnv_rcd);
} /* end if */
/* Adjust indices if FORTRAN style input was specified */
if(FORTRAN_IDX_CNV){
/* 20120726: Die when Fortran index is zero */
if(lmt.min_idx == 0L || lmt.max_idx == 0L){
(void)fprintf(stdout,"%s: ERROR User-specified Fortran (1-based) index for dimension %s = 0.\n",nco_prg_nm_get(),lmt.nm);
msg_sng=strdup("Fortran indices must be >= 1");
NCO_SYNTAX_ERROR=True;
} /* endif illegal Fortran index */
/* 20120709: Adjust positive indices only */
if(lmt.min_idx > 0L) lmt.min_idx--;
if(lmt.max_idx > 0L) lmt.max_idx--;
} /* end if */
/* Negative integer as min or max element of hyperslab specification indicates offset from end
pharoahs--20120708 Negative integers produce domain error
20120709--20141001 Negative integer is elements away from last element, e.g., -1 is penultimate element
20141002--forever -1 is last element, e.g., -2 is penultimate element, -N is first element (Python convention) */
nco_bool flg_old_usg=False;
if(lmt.min_idx == 0L && lmt.min_sng)
if(lmt.min_sng[0] == '-')
flg_old_usg=True;
if(lmt.max_idx == 0L && lmt.max_sng)
if(lmt.max_sng[0] == '-')
flg_old_usg=True;
if(flg_old_usg) (void)fprintf(stdout,"%s: WARNING Only NCO 4.4.6 treats negative zero as the last element of a dimension. Beginning 20141002, NCO uses the Python convention where negative one is the last element of a dimension, and negative zero is the same as zero and so selects the first element of a dimension. Negative zero also causes this warning to be printed in case the 4.4.6 behavior was intended.\n",nco_prg_nm_get());
if(lmt.min_idx < 0L) lmt.min_idx+=dmn_sz;
if(lmt.max_idx < 0L) lmt.max_idx+=dmn_sz;
/* Exit if requested indices are invalid for all operators... */
if(lmt.min_idx < 0L){
msg_sng=strdup("Minimum index is too negative");
NCO_SYNTAX_ERROR=True;
}else if(lmt.max_idx < 0L){
msg_sng=strdup("Maximum index is too negative");
NCO_SYNTAX_ERROR=True;
}else if(lmt.ilv > 1L && lmt.ssc > lmt.srd){
(void)fprintf(stdout,"%s: ERROR User-specified subcycle exceeds stride for dimension %s: %li > %li\n",nco_prg_nm_get(),lmt.nm,lmt.ssc,lmt.srd);
msg_sng=strdup("Subcycle exceeds stride");
NCO_SYNTAX_ERROR=True;
}else if(lmt.ssc % lmt.ilv != 0L){
(void)fprintf(stdout,"%s: ERROR Interleave stride must evenly divide group size for dimension %s: %li %% %li = %li != 0\n",nco_prg_nm_get(),lmt.nm,lmt.ssc,lmt.ilv,lmt.ssc%lmt.ilv);
msg_sng=strdup("Interleave stride does not evenly divide sub-cycle length");
NCO_SYNTAX_ERROR=True;
}else if(!rec_dmn_and_mfo && lmt.min_idx >= dmn_sz){
msg_sng=strdup("Minimum index greater than size in non-MFO");
NCO_SYNTAX_ERROR=True;
(void)fprintf(stdout,"%s: ERROR User-specified dimension index range %li <= %s <= %li does not fall within valid dimension index range 0 <= %s <= %li\n",nco_prg_nm_get(),lmt.min_idx,lmt.nm,lmt.max_idx,lmt.nm,dmn_sz-1L);
} /* end if impossible indices */
if(NCO_SYNTAX_ERROR){
(void)fprintf(stdout,"%s: ERROR evaluating hyperslab specification for %s: %s\n%s: HINT Conform request to hyperslab documentation at http://nco.sf.net/nco.html#hyp\n",nco_prg_nm_get(),lmt.nm,msg_sng,nco_prg_nm_get());
msg_sng=(char *)nco_free(msg_sng);
nco_exit(EXIT_FAILURE);
} /* !NCO_SYNTAX_ERROR */
/* NB: Subcycle is officially supported only for ncra and ncrcat (record dimension only) */
if(lmt.ssc != 1L && !rec_dmn_and_mfo) (void)fprintf(stderr,"%s: WARNING Subcycle argument is only supported for the record dimension on ncra and ncrcat operations\n",nco_prg_nm_get());
/* Logic depends on whether this is record dimension in multi-file operator */
if(!rec_dmn_and_mfo || !lmt.is_usr_spc_lmt){
/* For non-record dimensions and for record dimensions where limit
was automatically generated (to include whole file), starting
and ending indices are simply minimum and maximum indices already
in structure */
lmt.srt=lmt.min_idx;
lmt.end=lmt.max_idx;
}else{
/* Initialize rec_skp_vld_prv to 0L on first call to nco_lmt_evl()
This is necessary due to intrinsic hysterisis of rec_skp_vld_prv
rec_skp_vld_prv is used only by multi-file operators
rec_skp_vld_prv counts records skipped at end of previous valid file
rec_usd_cml and rec_skp_ntl_spf are both zero only for first file */
if(rec_usd_cml == 0L && lmt.rec_skp_ntl_spf == 0L) lmt.rec_skp_vld_prv=0L;
/* For record dimensions with user-specified limits, allow possibility
that limits pertain to record dimension in a multi-file operator.
Then user-specified maximum index may exceed number of records in any one file
Thus lmt.srt does not necessarily equal lmt.min_idx and
lmt.end does not necessarily equal lmt.max_idx */
/* NB: Stride is officially supported for ncks (all dimensions) and for ncra and ncrcat (record dimension only) */
if(lmt.srd != 1L && nco_prg_id != ncks && !lmt.is_rec_dmn) (void)fprintf(stderr,"%s: WARNING Stride argument for non-record dimension is only supported by ncks, use at your own risk...\n",nco_prg_nm_get());
{ /* Block hides scope of local internal variables */
long srt_min_lcl; /* [idx] Minimum start index (in absolute index space, i.e., relative to first file) for current file */
long end_max_lcl; /* [idx] Maximum end index (in absolute index space, i.e., relative to first file) for current file */
srt_min_lcl=(lmt.is_usr_spc_min ? lmt.min_idx : lmt.rec_in_cml+0L);
end_max_lcl=(lmt.is_usr_spc_max ? lmt.max_idx : lmt.rec_in_cml+dmn_sz-1L);
/* Maximum allowed index in record dimension */
lmt.idx_end_max_abs=end_max_lcl;
/* Have we reached file containing srt_min_lcl yet? */
if(srt_min_lcl > lmt.rec_in_cml+dmn_sz-1L){
/* This and all previous files are superfluous because the starting record is in a subsequent file */
flg_no_data_ok=True;
goto no_data_ok;
} /* endif srt_min_lcl in future file */
/* Until records have been used, start index is srt_min_lcl adjusted for records contained in all previous files
Thereafter start index loses memory of/dependence on absolute start index, and only cares for how many records,
if any, were skipped since last valid record. This number, modulo stride, is new start index. */
if(rec_usd_cml == 0L) lmt.srt=srt_min_lcl-lmt.rec_in_cml; else lmt.srt=lmt.srd-1L-lmt.rec_skp_vld_prv%lmt.srd;
if(lmt.srt > dmn_sz-1L){
/* Perhaps data were read in previous file(s) yet next record is in future file due to long stride */
flg_no_data_ok=True;
goto no_data_ok;
} /* endif */
lmt.end=(end_max_lcl < lmt.rec_in_cml+dmn_sz) ? end_max_lcl-lmt.rec_in_cml : dmn_sz-1L;
/* If lmt.srt <= lmt.end then then there are (may be?) valid records in current file
If lmt.srt > lmt.end then all desired data (for this dimension) were definitely read in previous file(s)
This happens when user-specified lmt.max_idx is not desired (because min_idx+N*stride skips over it)
Then we would first find out right here that all desired data have been read
Test for end_max_lcl above does not catch this case because we are _in_ (not beyond) file with end_max_lcl */
/* Are we past file containing end_max_lcl yet? */
if(end_max_lcl < lmt.rec_in_cml){
/* This and all subsequent files are superfluous because all requested records have already been read
Optimize MFOs by checking "input complete" flag to jump out of file loop
Saves time because no other input files will be opened */
lmt.flg_input_complete=True;
flg_no_data_ok=True;
goto no_data_ok;
} /* endif past end_max_lcl */
if((end_max_lcl < lmt.rec_in_cml) || /* Are we past file containing end_max_lcl yet? */
(lmt.srt > lmt.end)){ /* Does stride put first index beyond last possible index? */
/* This and all subsequent files are superfluous because all requested records have already been read
Optimize MFOs by checking "input complete" flag to jump out of file loop
Saves time because no other input files will be opened */
lmt.flg_input_complete=True;
flg_no_data_ok=True;
goto no_data_ok;
} /* endif already past end_max_lcl or will stride over end_max_lcl */
} /* end block hides scope of local internal variables */
/* If we are here then there are valid records in current file */
} /* endif user-specified limits to record dimension */
} /* end else limit arguments are hyperslab indices */
/* NB: MFO record dimension never reaches this block if current file is superfluous
In that case code has already branched down to flg_data_ok or flg_data_err */
if(rec_dmn_and_mfo){
/* NB: This is---and must be---performed as integer arithmetic */
cnt_rmn_crr=1L+(lmt.end-lmt.srt)/lmt.srd;
/* This fixes "sloppy" specification of end index by user, i.e., ensures that end index coincides with a stride */
lmt.end=lmt.srt+(cnt_rmn_crr-1L)*lmt.srd;
/* Save current rec_skp_vld_prv for diagnostics (printed below) for this file */
rec_skp_vld_prv_dgn=lmt.rec_skp_vld_prv;
/* Next file must know how many records in this file come after (and thus will be skipped) last used record in this file */
lmt.rec_skp_vld_prv=dmn_sz-1L-lmt.end;
} /* !rec_dmn_and_mfo */
/* Compute cnt from srt, end, and srd
This is misleading though fine for multi-file record dimensions since those operators always
read-in and write-out single records and thus never actually use lmt.cnt for record dimension. */
if(lmt.srd == 1L){
if(lmt.srt <= lmt.end) lmt.cnt=lmt.end-lmt.srt+1L; else lmt.cnt=dmn_sz-lmt.srt+lmt.end+1L;
}else{
if(lmt.flg_ilv){
/* In ILV mode in valid ILV files (which are evenly aligned so groups never cross files)
all records between srt and end for one sub-cycle are valid though not sequential
There are no gaps between valid records in a sub-cycle and consecutive groups are interleaved
Hence the effective "stride", in terms of the cnt measure, is always 1L for ILV
When stride exceeds one then sub-cycles have intervening space
Remember, though, that cnt here is diagnostic and is not used in ncra, ncrcat
20200721: diagnostic cnt here should be modified to account for ssc stride */
if(lmt.srt <= lmt.end) lmt.cnt=lmt.end-lmt.srt+1L; else lmt.cnt=1L+(dmn_sz-lmt.srt)+lmt.end;
}else{ /* !lmt.flg_ilv */
if(lmt.srt <= lmt.end) lmt.cnt=1L+(lmt.end-lmt.srt)/lmt.srd; else lmt.cnt=1L+((dmn_sz-lmt.srt)+lmt.end)/lmt.srd;
} /* !lmt.flg_ilv */
} /* end else */
/* NB: Degenerate cases of WRP && SRD exist for which dmn_cnt_2 == 0
This occurs when srd is large enough, or max_idx small enough,
such that no values are selected in the second read.
e.g., "-d lon,60,0,10" if sz(lon)=128 has dmn_cnt_2 == 0
Since netCDF library reports an error reading and writing cnt=0 dimensions, kludge is necessary
Syntax ensures that it is always the second read, not the first, which is obviated
Therefore we convert these degenerate cases into non-wrapped coordinates to be processed by single read
For these degenerate cases only, [srt,end] are not a permutation of [min_idx,max_idx] */
if(
(lmt.srd != 1L) && /* SRD */
(lmt.srt > lmt.end) && /* WRP */
(lmt.cnt == (1L+(dmn_sz-lmt.srt-1L)/lmt.srd)) && /* dmn_cnt_1 == cnt -> dmn_cnt_2 == 0 */
True){
long greatest_srd_multiplier_1st_hyp_slb; /* Greatest integer m such that srt+m*srd < dmn_sz */
long last_good_idx_1st_hyp_slb; /* C-index of last valid member of 1st hyperslab (= srt+m*srd) */
/* long left_over_idx_1st_hyp_slb;*/ /* # of elements from first hyperslab that count towards current stride */
long first_good_idx_2nd_hyp_slb; /* C-index of first valid member of 2nd hyperslab, if any */
/* NB: Perform these operations with integer arithmetic or else! */
/* Wrapped dimensions with stride may not start at idx 0 on second read */
greatest_srd_multiplier_1st_hyp_slb=(dmn_sz-lmt.srt-1L)/lmt.srd;
last_good_idx_1st_hyp_slb=lmt.srt+lmt.srd*greatest_srd_multiplier_1st_hyp_slb;
/* left_over_idx_1st_hyp_slb=dmn_sz-last_good_idx_1st_hyp_slb-1L;*/
first_good_idx_2nd_hyp_slb=(last_good_idx_1st_hyp_slb+lmt.srd)%dmn_sz;
/* Conditions causing dmn_cnt_2 == 0 */
if(first_good_idx_2nd_hyp_slb > lmt.end) lmt.end=last_good_idx_1st_hyp_slb;
} /* end if */
/* Cases where domain brackets no data, in error, have counts set to zero here
This kludge allows codepaths for both WRP and out-of-domain to flow without goto statements
Out-of-domain errors will soon exit with error, while WRP conditions will proceed */
if(flg_no_data_err) lmt.cnt=0L;
/* Exit when valid bracketed range contains no coordinates and this is not a superfluous file in an MFO */
if(lmt.cnt == 0){
if(lmt.lmt_typ == lmt_crd_val || lmt.lmt_typ == lmt_udu_sng){
(void)fprintf(stdout,"%s: ERROR %s reports domain %15.9e <= %s <= %15.9e brackets no coordinate values\n",nco_prg_nm_get(),fnc_nm,lmt.min_val,lmt.nm,lmt.max_val);
if(lmt.min_sng) (void)fprintf(stdout,"%s: INFO user-specified coordinate minimum: \"%s\"\n",nco_prg_nm_get(),lmt.min_sng);
if(lmt.max_sng) (void)fprintf(stdout,"%s: INFO user-specified coordinate maximum: \"%s\"\n",nco_prg_nm_get(),lmt.max_sng);
} /* !lmt_typ */
if(lmt.lmt_typ == lmt_dmn_idx) (void)fprintf(stdout,"%s: ERROR Indices bracket empty domain for %s\n",nco_prg_nm_get(),lmt.nm);
nco_exit(EXIT_FAILURE);
} /* !lmt.cnt */
/* Coordinate-valued limits that bracket no values in current file jump here with goto
Index-valued limits with no values in current file flow here naturally */
no_data_ok: /* end goto */
if(flg_no_data_ok){
/* File is superfluous (contributes no data) to specified hyperslab
Set output parameters to well-defined state
This state must not cause ncra or ncrcat to retrieve any data
ncra and ncrcat use loops for the record dimension, so this is
accomplished by setting loop control values (lmt_rec.srt > lmt_rec.end)
that cause record loop always to be skipped (never entered) */
lmt.srt=-1L;
lmt.end=lmt.srt-1L;
lmt.cnt=-1L;
/* Augment number of records skipped in initial superfluous files */
if(rec_usd_cml == 0L) lmt.rec_skp_ntl_spf+=dmn_sz;
/* Augment records skipped since last good one */
lmt.rec_skp_vld_prv+=dmn_sz;
/* Set variables to preserve utility of diagnostics at end of routine */
cnt_rmn_crr=rec_skp_vld_prv_dgn=0L;
} /* endif */
/* Accumulate count of records in all opened files, including this one
Increment here at end so this structure member includes records from current file
only at end of this routine, where it can only be used diagnostically
NB: Location of this augmentation is important! Moving it would have side-effects!
Consult CSZ before doing so */
lmt.rec_in_cml+=dmn_sz;
/* Index juggling only used for interleaved option in ncra/ncrcat */
if(lmt.flg_ilv){
/* 20200716: Assume input files to ILV operations align on even interleaved boundaries, i.e.,
(end - srt + 1) % srd == 0
nco_lmt_evl() currently sets lmt.end to last valid index of first interleaved index
Could adjust lmt.end to last valid index of last interleaved index
That way lmt.end would truly reflect index of last record desired in file
However, SSC works well with current convention so stick with it to leverage SSC for ILV
This implies additional condition that
ssc*srd == end - srt + 1L (Old API)
We thus prescribe SSC for ILV based on srt, end, and srd:
lmt.ssc=(lmt.end+lmt.srd)/lmt.srd;
20200721: New API
--ilv,ilv -d time,srt,end,srd,ssc
Hence sets ilv separately from srd, and ssc must be set explicitly and consistently
Rules:
1. ssc % ilv = 0 # Interleave stride must evenly divide group size
Old Rule: This rule was imposed in old API, though is unnecessary in new API
Actual new rule is much more relaxed: sub-cycles cannot cross between files
2. (end - srt + 1) % ssc = 0 # Group size must evenly divide file size when ilv > 1 */
assert(lmt.ssc % lmt.ilv == 0L);
} /* !lmt.flg_ilv */
if(nco_dbg_lvl_get() >= nco_dbg_fl && lmt.flg_ilv){
(void)nco_prn_lmt(lmt,min_lmt_typ,FORTRAN_IDX_CNV,flg_no_data_ok,rec_usd_cml,monotonic_direction,rec_dmn_and_mfo,cnt_rmn_ttl,cnt_rmn_crr,rec_skp_vld_prv_dgn);
} /* end dbg */
if(lmt.srt > lmt.end && !flg_no_data_ok){
if(nco_prg_id != ncks) (void)fprintf(stderr,"WARNING: Possible instance of Schweitzer data hole requiring better diagnostics TODO #148\n");
if(nco_prg_id != ncks) (void)fprintf(stderr,"HINT: If operation fails, try multislabbing (http://nco.sf.net/nco.html#msa) wrapped dimension using ncks first, and then apply %s to the resulting file\n",nco_prg_nm_get());
} /* end dbg */
/* Place contents of working structure in location of returned structure */
*lmt_ptr=lmt;
fl_udu_sng=(char *)nco_free(fl_udu_sng);
} /* !nco_lmt_evl() */
void
nco_lmt_evl_dmn_crd /* [fnc] Parse user-specified limits into hyperslab specifications */
(const int nc_id, /* I [ID] netCDF file ID */
long rec_usd_cml, /* I [nbr] Number of valid records already processed (only used for record dimensions in multi-file operators) */
nco_bool FORTRAN_IDX_CNV, /* I [flg] Hyperslab indices obey Fortran convention */
const char * const grp_nm_fll,/* I [sng] Full group name (dimension or coordinate) */
const char * const nm, /* I [sng] Name (dimension or coordinate) */
const size_t sz, /* I [nbr] Size (dimension or coordinate) */
const nco_bool is_rec, /* I [flg] Is a record (dimension or coordinate) */
const nco_bool is_crd, /* I [flg] Is a coordinate variable */
lmt_sct *lmt_ptr) /* I/O [sct] Structure from nco_lmt_prs() in input, filled on output */
{
/* Purpose: Take parsed list of dimension names, minima, and maxima strings
and find appropriate indices into dimensions for formulation of
dimension start and count vectors, or fail trying.
Based on original nco_lmt_evl(). Used for both dimensions and coordinate variables.
Use case example:
/lon(4)
/g8/lon(2)
ncks -d lon,0,3,1 -v lon -H ~/nco/data/in_grp.nc
"-d lon,0,3,1" is valid for /lon(4) but not for /g8/lon(2)
Reminder:
Coordinate values should be specified using real notation with a decimal point required in the value,
whereas dimension indices are specified using integer notation without a decimal point.
ncks -d lat,-90.,90.,1 -H -v area ~/nco/data/in_grp.nc # limit type is defined as lmt_crd_val
ncks -d lat,0,1,1 -H -v area ~/nco/data/in_grp.nc # limit type is defined as lmt_dmn_idx
lmt_crd_val, 0, Coordinate value limit
lmt_dmn_idx, 1, Dimension index limit
lmt_udu_sng 2, UDUnits string
Tests:
ncks -D 11 -d lon,0.,90.,1 -v lon -H ~/nco/data/in_grp.nc
ncks -D 11 -d lon,0,1,1 -v lon -H ~/nco/data/in_grp.nc */
const char fnc_nm[]="nco_lmt_evl_dmn_crd()";
char *fl_udu_sng=NULL_CEWI; /* [sng] Store units attribute of coordinate dimension */
char *msg_sng=NULL_CEWI; /* [sng] Error message */
char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */
nco_bool flg_no_data_err=False; /* [flg] True if domain brackets no data (and not an MFO/record coordinate) */
nco_bool flg_no_data_ok=False; /* [flg] True if file contains no data for hyperslab */
nco_bool rec_dmn_and_mfo=False; /* [flg] True if record dimension in multi-file operator */
nco_bool NCO_SYNTAX_ERROR=False;/* [flg] Syntax error in hyperslab specification */
dmn_sct dim; /* [sct] Dimension Structure */
lmt_sct lmt; /* [sct] Structure from nco_lmt_prs() */
int min_lmt_typ=int_CEWI;
int max_lmt_typ=int_CEWI;
int nco_prg_id; /* [enm] Program ID */
monotonic_direction_enm monotonic_direction=not_checked; /* CEWI */
size_t dmn_sz; /* [nbr] Dimension size */
long cnt_rmn_crr=-1L; /* [nbr] Records to extract from current file */
long cnt_rmn_ttl=-1L; /* [nbr] Total records to be read from this and all remaining files */
long rec_skp_vld_prv_dgn=-1L; /* [nbr] Records skipped at end of previous valid file, if any (diagnostic only) */
int var_id=-1; /* [id] ID of variable */
int grp_id=-1; /* [id] ID of group */
nc_type var_typ=NC_NAT; /* [enm] Type of variable */
lmt=*lmt_ptr;
nco_prg_id=nco_prg_id_get();
/* Initialize limit structure */
lmt.flg_mro=False;
lmt.flg_mso=False;
lmt.max_val=0.0;
lmt.min_val=0.0;
lmt.ssc=1L;
lmt.srd=1L;
lmt.flg_input_complete=False;
/* Obtain group ID */
(void)nco_inq_grp_full_ncid(nc_id,grp_nm_fll,&grp_id);
/* Use parameter to inquire about coordinate.
NB: There might be cases where a variable with the same name as dimension exists,
but it is not a "real" 1-D coordinate. Coordinates must be 1D.
Use case:
ncks -O -v ts -d time,0,1 -d Latitude,40.0 -d Longitude,-105.0 http://hydro1.sci.gsfc.nasa.gov/opendap/hyrax/ncml/LPRM_AMSRE_D_SOILM3_timeSeries.ncml amsre.nc */
if(is_crd){
/* Obtain coordinate variable ID */
(void)nco_inq_varid(grp_id,nm,&var_id);
/* Get coordinate type */
(void)nco_inq_vartype(grp_id,var_id,&var_typ);
} /* !is_crd */
/* Use info from parameter to assign locally used size */
dmn_sz=sz;
/* Use info from parameter to assign record/not record to limit */
lmt.is_rec_dmn=is_rec;
/* Logic on whether to allow skipping current file depends on whether limit
is specified for record dimension in multi-file operators.
This information is not used in single-file operators, though whether
the limit is a record limit may be tested.
Program defensively and define this flag in all cases. */
if(lmt.is_rec_dmn && (nco_prg_id == ncra || nco_prg_id == ncrcat)) rec_dmn_and_mfo=True; else rec_dmn_and_mfo=False;
if(rec_dmn_and_mfo){
lmt.rec_dmn_sz=dmn_sz;
lmt.idx_end_max_abs=lmt.rec_in_cml+dmn_sz-1L; /* Maximum allowed index in record dimension */
} /* !rec_dmn_and_mfo */
/* Bomb if dmn_sz < 1 */
if(dmn_sz < 1L){
(void)fprintf(stdout,"%s: ERROR Size of dimension %s is %li in input file, but must be > 0 in order to apply limits.\n",nco_prg_nm_get(),lmt.nm,dmn_sz);
nco_exit(EXIT_FAILURE);
} /* end if */
if(lmt.srd_sng){
if(strchr(lmt.srd_sng,'.') || strchr(lmt.srd_sng,'e') || strchr(lmt.srd_sng,'E') || strchr(lmt.srd_sng,'d') || strchr(lmt.srd_sng,'D')){
(void)fprintf(stdout,"%s: ERROR Requested stride for %s, %s, must be integer\n",nco_prg_nm_get(),lmt.nm,lmt.srd_sng);
nco_exit(EXIT_FAILURE);
} /* end if */
lmt.srd=strtol(lmt.srd_sng,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(lmt.srd_sng,"strtol",sng_cnv_rcd);
if(lmt.srd < 1L){
(void)fprintf(stdout,"%s: ERROR Stride for %s is %li but must be > 0\n",nco_prg_nm_get(),lmt.nm,lmt.srd);
nco_exit(EXIT_FAILURE);
} /* end if */
} /* !lmt.srd_sng */
if(lmt.ssc_sng){
if(strchr(lmt.ssc_sng,'.') || strchr(lmt.ssc_sng,'e') || strchr(lmt.ssc_sng,'E') || strchr(lmt.ssc_sng,'d') || strchr(lmt.ssc_sng,'D')){
(void)fprintf(stdout,"%s: ERROR Requested subcycle argument for %s, %s, must be integer\n",nco_prg_nm_get(),lmt.nm,lmt.ssc_sng);
nco_exit(EXIT_FAILURE);
} /* end if */
lmt.ssc=strtol(lmt.ssc_sng,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(lmt.ssc_sng,"strtol",sng_cnv_rcd);
if(lmt.ssc < 1L){
(void)fprintf(stdout,"%s: ERROR Subcycle argument for %s is %li but must be > 0\n",nco_prg_nm_get(),lmt.nm,lmt.ssc);
nco_exit(EXIT_FAILURE);
} /* end if */
if(nco_prg_id != ncra && nco_prg_id != ncrcat){
(void)fprintf(stdout,"%s: ERROR Subcycle hyperslabs only implemented for ncra and ncrcat\n",nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
} /* end ncra */
} /* !lmt.ssc_sng */
if(lmt.ilv_sng){
if(strchr(lmt.ilv_sng,'.') || strchr(lmt.ilv_sng,'e') || strchr(lmt.ilv_sng,'E') || strchr(lmt.ilv_sng,'d') || strchr(lmt.ilv_sng,'D')){
(void)fprintf(stdout,"%s: ERROR Requested interleave stride argument for %s, %s, must be integer\n",nco_prg_nm_get(),lmt.nm,lmt.ilv_sng);
nco_exit(EXIT_FAILURE);
} /* end if */
lmt.ilv=strtol(lmt.ilv_sng,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(lmt.ilv_sng,"strtol",sng_cnv_rcd);
if(lmt.ilv < 1L){
(void)fprintf(stdout,"%s: ERROR Interleave stride argument for %s is %li but must be > 0\n",nco_prg_nm_get(),lmt.nm,lmt.ilv);
nco_exit(EXIT_FAILURE);
} /* end if */
if(nco_prg_id != ncra && nco_prg_id != ncrcat){
(void)fprintf(stdout,"%s: ERROR Interleave stride hyperslabs only implemented for ncra and ncrcat\n",nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
} /* end ncra */
if(lmt.ilv > 1L){
lmt.flg_ilv=True;
//(void)fprintf(stdout,"%s: quark2 lmt.ilv_sng = %s, lmt.ilv = %ld, flg_ilv = %s\n",nco_prg_nm_get(),lmt.ilv_sng == NULL ? "NULL" : lmt.ilv_sng,lmt.ilv,lmt.flg_ilv ? "YES" : "NO");
} /* !lmt.ilv */
} /* !lmt.ilv_sng */
/* In case flg_mro is set in ncra.c by --mro */
if(lmt.flg_mro){
if(nco_prg_id == ncrcat){
(void)fprintf(stdout,"%s: INFO Specifying Multi-Record Output (MRO) option (--mro) is redundant. MRO is always true for ncrcat.\n",nco_prg_nm_get());
}else if(nco_prg_id != ncra){
(void)fprintf(stdout,"%s: ERROR Multi-Record Output (MRO) option (--mro) is only valid for ncra.\n",nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
} /* end else */
} /* !lmt.flg_mro */
/* In case flg_mso is set in ncra.c by --mso */
if(lmt.flg_mso){
if(nco_prg_id == ncrcat){
(void)fprintf(stdout,"%s: INFO Specifying Multi-Subcycle Output (MSO) option (--mso) is redundant. MSO is always true for ncrcat.\n",nco_prg_nm_get());
}else if(nco_prg_id != ncra){
(void)fprintf(stdout,"%s: ERROR Multi-Subcycle Output (MSO) option (--mso) is only valid for ncra.\n",nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
} /* end else */
} /* !lmt.flg_mso */
/* 20200721 Context-sensitive argument inferral makes default (blank) arguments more useful
Order and mutual-exclusivity of these conditions is important */
if(lmt.ilv_sng && !lmt.ssc_sng && !lmt.srd_sng){
lmt.ssc=lmt.ilv;
lmt.srd=lmt.ssc;
}else if(lmt.ilv_sng && !lmt.ssc_sng){
lmt.ssc=lmt.ilv;
}else if(lmt.ilv_sng && !lmt.srd_sng){
lmt.srd=lmt.ssc;
}else if(lmt.ssc_sng && !lmt.srd_sng){
lmt.srd=lmt.ssc;
} /* lmt.ilv */
/* Set MRO whenever interleave is explicitly requested */
if(lmt.ilv_sng) lmt.flg_mro=True;
/* Set MSO whenever interleave is explicitly requested */
if(lmt.ilv_sng) lmt.flg_mso=True;
/* If min_sng and max_sng are both NULL then set type to lmt_dmn_idx */
if(lmt.min_sng == NULL && lmt.max_sng == NULL){
/* Limiting indices will be set to default extrema a bit later */
min_lmt_typ=max_lmt_typ=lmt_dmn_idx;
}else{
/* min_sng and max_sng are not both NULL */
/* Limit is coordinate value if string contains decimal point or is in exponential format
Otherwise limit is interpreted as zero-based dimension offset */
if(lmt.min_sng) min_lmt_typ=nco_lmt_typ(lmt.min_sng);
if(lmt.max_sng) max_lmt_typ=nco_lmt_typ(lmt.max_sng);
/* Copy lmt_typ from defined limit to undefined */
if(lmt.min_sng == NULL) min_lmt_typ=max_lmt_typ;
if(lmt.max_sng == NULL) max_lmt_typ=min_lmt_typ;
} /* end else */
/* Both min_lmt_typ and max_lmt_typ are now defined
Continue only if both limits are of the same type */
if(min_lmt_typ != max_lmt_typ){
(void)fprintf(stdout,"%s: ERROR -d %s,%s,%s\n",nco_prg_nm_get(),lmt.nm,lmt.min_sng,lmt.max_sng);
(void)fprintf(stdout,"Limits on dimension \"%s\" must be of same numeric type:\n",lmt.nm);
(void)fprintf(stdout,"\"%s\" was interpreted as a %s.\n",lmt.min_sng,((min_lmt_typ == lmt_crd_val) || (min_lmt_typ == lmt_udu_sng)) ? "coordinate value" : (FORTRAN_IDX_CNV) ? "one-based dimension index" : "zero-based dimension index");
(void)fprintf(stdout,"\"%s\" was interpreted as a %s.\n",lmt.max_sng,((max_lmt_typ == lmt_crd_val) || (max_lmt_typ == lmt_udu_sng)) ? "coordinate value" : (FORTRAN_IDX_CNV) ? "one-based dimension index" : "zero-based dimension index");
(void)fprintf(stdout,"(Limit arguments containing a decimal point (or in exponential format) are interpreted as coordinate values; arguments without a decimal point are interpreted as zero-based or one-based (depending on -F switch) dimensional indices.)\n");
nco_exit(EXIT_FAILURE);
} /* end if */
lmt.lmt_typ=min_lmt_typ;
/* Coordinate re-basing code */
lmt.origin=0.0;
/* If there is a coordinate variable */
if(is_crd){
/* Get variable ID of coordinate */
(void)nco_inq_varid(grp_id,lmt.nm,&dim.cid);
/* Ensure variable is 1-D so is true coordinate */
(void)nco_inq_varid(grp_id,lmt.nm,&dim.cid);
char *cln_sng=NULL_CEWI;
fl_udu_sng=nco_lmt_get_udu_att(grp_id,var_id,"units"); /* Units attribute of coordinate variable */
cln_sng=nco_lmt_get_udu_att(grp_id,var_id,"calendar"); /* Calendar attribute */
if(rec_dmn_and_mfo && fl_udu_sng && lmt.rbs_sng){
#ifdef ENABLE_UDUNITS
/* Re-base and reset origin to 0.0 if re-basing fails */
// if(nco_cln_clc_org(fl_udu_sng,lmt.rbs_sng,lmt.cln_typ,&lmt.origin) != NCO_NOERR) lmt.origin=0.0;
#endif /* !ENABLE_UDUNITS */
} /* endif */
/* ncra and ncrcat read the "calendar" attribute in main()
Avoid multiple reads of calendar attribute in multi-file operations */
if(!rec_dmn_and_mfo){
if(cln_sng) lmt.cln_typ=nco_cln_get_cln_typ(cln_sng); else lmt.cln_typ=cln_nil;
} /* endif */
if(cln_sng) cln_sng=(char *)nco_free(cln_sng);
} /* End Needed only to read variable, if dimension is a coordinate variable */
if((lmt.lmt_typ == lmt_crd_val) || (lmt.lmt_typ == lmt_udu_sng)){
double *dmn_val_dp=NULL;
double dmn_max;
double dmn_min;
long max_idx;
long min_idx;
long tmp_idx;
size_t dmn_srt=0L;
/* Warn when coordinate type is weird */
if(var_typ == NC_BYTE || var_typ == NC_UBYTE || var_typ == NC_CHAR || var_typ == NC_STRING) (void)fprintf(stderr,"\n%s: WARNING Coordinate %s is type %s. Dimension truncation is unpredictable.\n",nco_prg_nm_get(),lmt.nm,nco_typ_sng(var_typ));
/* Allocate enough space to hold coordinate */
dmn_val_dp=(double *)nco_malloc(dmn_sz*nco_typ_lng(NC_DOUBLE));
#ifdef _OPENMP
#pragma omp critical
#endif /* _OPENMP */
{ /* begin OpenMP critical */
/* Block is critical for identical in_id's
Block is thread-safe for distinct in_id's */
/* 20110221: replace nco_get_vara() with nc_get_vara_double() */
/* Retrieve this coordinate */
int rcd;
rcd=nc_get_vara_double(grp_id,var_id,&dmn_srt,&dmn_sz,dmn_val_dp);
if(rcd != NC_NOERR) (void)fprintf(stdout,"%s: ERROR %s unable to read user-specified coordinate %s. Ensure this coordinate variable is in file and is a 1-D array.\n",nco_prg_nm_get(),fnc_nm,lmt.nm);
if(rcd != NC_NOERR) nco_err_exit(rcd,"nc_get_vara_double()");
} /* end OpenMP critical */
/* Officially change type */
var_typ=NC_DOUBLE;
/* Assuming coordinate is monotonic, direction of monotonicity is determined by first two elements */
if(dmn_sz == 1L){
monotonic_direction=increasing;
}else{
if(dmn_val_dp[0] > dmn_val_dp[1]) monotonic_direction=decreasing; else monotonic_direction=increasing;
} /* end else */
if(monotonic_direction == increasing){
min_idx=0L;
max_idx=dmn_sz-1L;
}else{
min_idx=dmn_sz-1L;
max_idx=0L;
} /* end else */
/* Determine min and max values of entire coordinate */
dmn_min=dmn_val_dp[min_idx];
dmn_max=dmn_val_dp[max_idx];
/* Set defaults */
lmt.min_val=dmn_val_dp[min_idx];
lmt.max_val=dmn_val_dp[max_idx];
/* Convert UDUnits strings if necessary */
/* If we are here then either min_sng or max_sng or both are set */
if(lmt.lmt_typ == lmt_udu_sng){
if(!fl_udu_sng){
(void)fprintf(stdout,"%s: ERROR attempting to read units attribute from variable \"%s\" \n",nco_prg_nm_get(),lmt.nm);
nco_exit(EXIT_FAILURE);
} /* end if */
if(lmt.min_sng)
if(nco_cln_clc_dbl_org(lmt.min_sng,fl_udu_sng,lmt.cln_typ,&lmt.min_val) != NCO_NOERR)
nco_exit(EXIT_FAILURE);
if(lmt.max_sng)
if(nco_cln_clc_dbl_org(lmt.max_sng,fl_udu_sng,lmt.cln_typ,&lmt.max_val) != NCO_NOERR)
nco_exit(EXIT_FAILURE);
}else{ /* end UDUnits conversion */
/* Convert user-specified limits into double precision numeric values, or supply defaults */
if(lmt.min_sng){
lmt.min_val=strtod(lmt.min_sng,&sng_cnv_rcd);
if(*sng_cnv_rcd) nco_sng_cnv_err(lmt.min_sng,"strtod",sng_cnv_rcd);
} /* !lmt.min_sng */
if(lmt.max_sng){
lmt.max_val=strtod(lmt.max_sng,&sng_cnv_rcd);
if(*sng_cnv_rcd) nco_sng_cnv_err(lmt.max_sng,"strtod",sng_cnv_rcd);
} /* !lmt.max_sng */
/* Re-base coordinates as necessary in multi-file operatators (MFOs)
lmt.origin was calculated earlier in routine */
/*
if(rec_dmn_and_mfo){
if(lmt.min_sng) lmt.min_val-=lmt.origin;
if(lmt.max_sng) lmt.max_val-=lmt.origin;
}
*/
if(rec_dmn_and_mfo && fl_udu_sng && lmt.rbs_sng && strcmp(fl_udu_sng,lmt.rbs_sng)){
if(lmt.min_sng)
if(nco_cln_clc_dbl_var_dff(lmt.rbs_sng,fl_udu_sng,lmt.cln_typ,&lmt.min_val,(var_sct*)NULL) != NCO_NOERR)
nco_exit(EXIT_FAILURE);
if(lmt.max_sng)
if(nco_cln_clc_dbl_var_dff(lmt.rbs_sng,fl_udu_sng,lmt.cln_typ,&lmt.max_val,(var_sct*)NULL) != NCO_NOERR)
nco_exit(EXIT_FAILURE);
if(nco_dbg_lvl_get() > nco_dbg_std) fprintf(stdout,"%s: INFO nco_lmt rebasing min_val=%f max_val=%f\n",nco_prg_nm_get(),lmt.min_val,lmt.max_val);
} /* endif MFO */
} /* end UDUnits conversion */
/* Warn when min_val > max_val (i.e., wrapped coordinate) */
if(nco_dbg_lvl_get() > nco_dbg_std && lmt.min_val > lmt.max_val) (void)fprintf(stderr,"%s: INFO Interpreting hyperslab specifications as wrapped coordinates [%s <= %g] and [%s >= %g]\n",nco_prg_nm_get(),lmt.nm,lmt.max_val,lmt.nm,lmt.min_val);
/* Fail when... */
if(
/* Following condition added 20000508, changes behavior of single point
hyperslabs depending on whether hyperslab occurs in record dimension
during multi-file operator operation.
Altered behavior of single point hyperslabs so that single point
hyperslabs in the record coordinate (i.e., -d time,1.0,1.0) may be
treated differently than single point hyperslabs in other
coordinates. Multifile operators will skip files if single point
hyperslabs in record coordinate lay outside record coordinate
range of file. For non-record coordinates (and for all operators
besides ncra and ncrcat on record coordinates), single point
hyperslabs will choose the closest value rather than skip the file
(I believe). This should be verified. */
/* User specified single point, coordinate is not wrapped, and both extrema fall outside valid crd range */
(rec_dmn_and_mfo && (lmt.min_val == lmt.max_val) && ((lmt.min_val > dmn_max) || (lmt.max_val < dmn_min))) ||
/* User did not specify single point, coordinate is not wrapped, and either extrema falls outside valid crd range */
((lmt.min_val < lmt.max_val) && ((lmt.min_val > dmn_max) || (lmt.max_val < dmn_min))) ||
/* User did not specify single point, coordinate is wrapped, and both extrema fall outside valid crd range */
((lmt.min_val > lmt.max_val) && ((lmt.min_val > dmn_max) && (lmt.max_val < dmn_min))) ||
False){
/* Allow for possibility that current file is superfluous */
if(rec_dmn_and_mfo){
flg_no_data_ok=True;
goto no_data_ok;
}else{
(void)fprintf(stdout,"%s: ERROR User-specified coordinate value range %g <= %s <= %g does not fall within valid coordinate range %g <= %s <= %g\n",nco_prg_nm_get(),lmt.min_val,lmt.nm,lmt.max_val,dmn_min,lmt.nm,dmn_max);
nco_exit(EXIT_FAILURE);
} /* end else */
} /* end if */
/* Armed with target coordinate minima and maxima, we are ready to bracket user-specified range */
/* If min_sng or max_sng were omitted, use extrema */
if(lmt.min_sng == NULL) lmt.min_idx=min_idx;
if(lmt.max_sng == NULL) lmt.max_idx=max_idx;
/* Single slice requires finding the closest coordinate */
if(lmt.min_val == lmt.max_val){
double dst_new;
double dst_old;
lmt.min_idx=0L;
dst_old=fabs(lmt.min_val-dmn_val_dp[0]);
for(tmp_idx=1L;tmp_idx<dmn_sz;tmp_idx++){
if((dst_new=fabs(lmt.min_val-dmn_val_dp[tmp_idx])) < dst_old){
dst_old=dst_new;
lmt.min_idx=tmp_idx;
} /* end if */
} /* end loop over tmp_idx */
lmt.max_idx=lmt.min_idx;
}else{ /* min_val != max_val */
/* Bracket specified extrema:
Should no coordinate values match the given criteria, flag the index with -1L
We defined the valid syntax such that single half range with -1L is not an error
This causes "-d lon,100.0,-100.0" to select [-180.0] when lon=[-180.0,-90.0,0.0,90.0]
because one of the specified half-ranges is valid (there are coordinates < -100.0).
However, "-d lon,100.0,-200.0" should fail when lon=[-180.0,-90.0,0.0,90.0] because both
of the specified half-ranges are invalid (no coordinate is > 100.0 or < -200.0).
-1L flags are replaced with correct indices (0L or dmn_sz-1L) following search loop block.
Overwriting -1L flags with 0L or dmn_sz-1L later is more heuristic than setting them = 0L here,
since 0L is valid search result. */
if(monotonic_direction == increasing){
if(lmt.min_sng){
/* Find index of smallest coordinate greater than min_val */
tmp_idx=0L;
while((dmn_val_dp[tmp_idx] < lmt.min_val) && (tmp_idx < dmn_sz)) tmp_idx++;
if(tmp_idx != dmn_sz) lmt.min_idx=tmp_idx; else lmt.min_idx=-1L;
} /* end if */
if(lmt.max_sng){
/* Find index of largest coordinate less than max_val */
tmp_idx=dmn_sz-1L;
while((dmn_val_dp[tmp_idx] > lmt.max_val) && (tmp_idx > -1L)) tmp_idx--;
if(tmp_idx != -1L) lmt.max_idx=tmp_idx; else lmt.max_idx=-1L;
} /* end if */
/* 20110221: csz fix hyperslab bug TODO nco1007 triggered by
ncks -O -v lat -d lat,20.,20.001 ~/nco/data/in.nc ~/foo.nc
This returned all values but should have returned none
Algorithm was broken because, although valid min and max indices existed,
they contained the empty set.
Now when this happens, set flg_no_data_err block */
if( /* Points are not wrapped ... */
(lmt.min_val < lmt.max_val) &&
/* ... and valid indices were found for both bracketing points... */
(lmt.min_idx != -1L && lmt.max_idx != -1L) &&
/* ...and indices contain empty set, i.e., min_idx > max_idx for increasing data... */
lmt.min_idx > lmt.max_idx) flg_no_data_err=True;
/* end if monotonic_direction == increasing */
}else{ /* monotonic_direction == decreasing */
if(lmt.min_sng){
/* Find index of smallest coordinate greater than min_val */
tmp_idx=dmn_sz-1L;
while((dmn_val_dp[tmp_idx] < lmt.min_val) && (tmp_idx > -1L)) tmp_idx--;
if(tmp_idx != -1L) lmt.min_idx=tmp_idx; else lmt.min_idx=-1L;
} /* end if */
if(lmt.max_sng){
/* Find index of largest coordinate less than max_val */
tmp_idx=0L;
while((dmn_val_dp[tmp_idx] > lmt.max_val) && (tmp_idx < dmn_sz)) tmp_idx++;
if(tmp_idx != dmn_sz) lmt.max_idx=tmp_idx; else lmt.max_idx=-1L;
} /* end if */
if( /* Points are not wrapped ... */
(lmt.min_val > lmt.max_val) &&
/* ... and valid indices were found for both bracketing points... */
(lmt.min_idx != -1L && lmt.max_idx != -1L) &&
/* ...and indices contain empty set, i.e., min_idx < max_idx for decreasing data... */
lmt.min_idx < lmt.max_idx) flg_no_data_err=True;
} /* end else monotonic_direction == decreasing */
/* Case where both min_idx and max_idx = -1 was flagged as error above
Case of wrapped coordinate: Either, but not both, of min_idx or max_idx will be flagged with -1
See explanation above */
if(lmt.min_idx == -1L && (lmt.min_val > lmt.max_val)) lmt.min_idx=0L;
if(lmt.max_idx == -1L && (lmt.min_val > lmt.max_val)) lmt.max_idx=dmn_sz-1L;
} /* end if min_val != max_val */
/* User-specified ranges are now bracketed */
/* Convert indices of minima and maxima to srt and end indices */
if(monotonic_direction == increasing){
lmt.srt=lmt.min_idx;
lmt.end=lmt.max_idx;
}else{
lmt.srt=lmt.max_idx;
lmt.end=lmt.min_idx;
} /* end else */
/* Free space allocated for dimension */
dmn_val_dp=(double*)nco_free(dmn_val_dp);
if(rec_dmn_and_mfo){
/* No wrapping with multi-file operators */
if((monotonic_direction == increasing && lmt.min_val > lmt.max_val) ||
(monotonic_direction == decreasing && lmt.min_val < lmt.max_val)){
flg_no_data_ok=True;
goto no_data_ok;
} /* endif */
if(rec_usd_cml == 0L){
/* Skipped records remains zero until valid records are processed */
lmt.rec_skp_vld_prv=0L;
}else if(rec_usd_cml > 0L){
/* Otherwise, adjust starting index by records skipped in jumps across file boundaries */
lmt.srt+=lmt.srd-1L-lmt.rec_skp_vld_prv%lmt.srd;
if(lmt.srt > lmt.end){
/* Do not allow record dimension wrapping in MFOs */
flg_no_data_ok=True;
goto no_data_ok;
} /* endif */
} /* endif */
/* If we are here then there are valid records in current file */
} /* end if rec_dmn_and_mfo */
}else{ /* end if limit arguments were coordinate values */
/* Convert limit strings to zero-based indicial offsets */
/* Specifying stride alone, but not min or max, is legal, e.g., -d time,,,2
Thus is_usr_spc_lmt may be True, even though one or both of min_sng, max_sng is NULL
Furthermore, both min_sng and max_sng are artifically created by nco_lmt_sct_mk()
for record dimensions when the user does not explicitly specify limits.
In this case, min_sng_and max_sng are non-NULL though no limits were specified
In fact, min_sng and max_sng are set to the minimum and maximum string
values of the first file processed.
However, we can tell if these strings were artificially generated because
nco_lmt_sct_mk() sets the is_usr_spc_lmt flag to False in such cases.
Subsequent files may have different numbers of records, but nco_lmt_sct_mk()
is only called once.
Thus we must update min_idx and max_idx here for each file
This causes min_idx and max_idx to be out of sync with min_sng and max_sng,
which are only set in nco_lmt_sct_mk() for the first file.
In hindsight, artificially generating min_sng and max_sng may be bad idea */
/* Following logic is messy, but hard to simplify */
if(lmt.min_sng == NULL || !lmt.is_usr_spc_lmt){
/* No user-specified value available--generate minimal dimension index */
if(FORTRAN_IDX_CNV) lmt.min_idx=1L; else lmt.min_idx=0L;
}else{
/* Use user-specified limit when available */
lmt.min_idx=strtol(lmt.min_sng,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(lmt.min_sng,"strtol",sng_cnv_rcd);
} /* end if */
if(lmt.max_sng == NULL || !lmt.is_usr_spc_lmt){
/* No user-specified value available---generate maximal dimension index */
if(FORTRAN_IDX_CNV) lmt.max_idx=dmn_sz; else lmt.max_idx=dmn_sz-1L;
}else{
/* Use user-specified limit when available */
lmt.max_idx=strtol(lmt.max_sng,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(lmt.max_sng,"strtol",sng_cnv_rcd);
} /* end if */
/* Adjust indices if FORTRAN style input was specified */
if(FORTRAN_IDX_CNV){
/* 20120726: Die when Fortran index is zero */
if(lmt.min_idx == 0L || lmt.max_idx == 0L){
(void)fprintf(stdout,"%s: ERROR User-specified Fortran (1-based) index for dimension %s = 0.\n",nco_prg_nm_get(),lmt.nm);
msg_sng=strdup("Fortran indices must be >= 1");
NCO_SYNTAX_ERROR=True;
} /* endif illegal Fortran index */
/* 20120709: Adjust positive indices only */
if(lmt.min_idx > 0L) lmt.min_idx--;
if(lmt.max_idx > 0L) lmt.max_idx--;
} /* end if */
/* Negative integer as min or max element of hyperslab specification indicates offset from end
pharoahs--20120708 Negative integers produce domain error
20120709--20141001 Negative integer is elements away from last element, e.g., -1 is penultimate element
20141002--forever -1 is last element, e.g., -2 is penultimate element, -N is first element (Python convention) */
nco_bool flg_old_usg=False;
if(lmt.min_idx == 0L && lmt.min_sng)
if(lmt.min_sng[0] == '-')
flg_old_usg=True;
if(lmt.max_idx == 0L && lmt.max_sng)
if(lmt.max_sng[0] == '-')
flg_old_usg=True;
if(flg_old_usg) (void)fprintf(stdout,"%s: WARNING Only NCO 4.4.6 treats negative zero as the last element of a dimension. Beginning 20141002, NCO uses the Python convention where negative one is the last element of a dimension, and negative zero is the same as zero and so selects the first element of a dimension. Negative zero also causes this warning to be printed in case the 4.4.6 behavior was intended.\n",nco_prg_nm_get());
if(lmt.min_idx < 0L) lmt.min_idx+=dmn_sz;
if(lmt.max_idx < 0L) lmt.max_idx+=dmn_sz;
/* Exit if requested indices are invalid for all operators... */
if(lmt.min_idx < 0L){
msg_sng=strdup("Minimum index is too negative");
NCO_SYNTAX_ERROR=True;
}else if(lmt.max_idx < 0L){
msg_sng=strdup("Maximum index is too negative");
NCO_SYNTAX_ERROR=True;
}else if(lmt.ilv > 1L && lmt.ssc > lmt.srd){
(void)fprintf(stdout,"%s: ERROR User-specified subcycle exceeds stride for dimension %s: %li > %li\n",nco_prg_nm_get(),lmt.nm,lmt.ssc,lmt.srd);
msg_sng=strdup("Subcycle exceeds stride");
NCO_SYNTAX_ERROR=True;
}else if(lmt.ssc % lmt.ilv != 0L){
(void)fprintf(stdout,"%s: ERROR Interleave stride must evenly divide group size for dimension %s: %li %% %li = %li != 0\n",nco_prg_nm_get(),lmt.nm,lmt.ssc,lmt.ilv,lmt.ssc%lmt.ilv);
msg_sng=strdup("Interleave stride does not evenly divide sub-cycle length");
NCO_SYNTAX_ERROR=True;
}else if(!rec_dmn_and_mfo && lmt.min_idx >= dmn_sz){
msg_sng=strdup("Minimum index greater than size in non-MFO");
NCO_SYNTAX_ERROR=True;
(void)fprintf(stdout,"%s: ERROR User-specified dimension index range %li <= %s <= %li does not fall within valid dimension index range 0 <= %s <= %li\n",nco_prg_nm_get(),lmt.min_idx,lmt.nm,lmt.max_idx,lmt.nm,dmn_sz-1L);
} /* end if impossible indices */
if(NCO_SYNTAX_ERROR){
(void)fprintf(stdout,"%s: ERROR evaluating hyperslab specification for %s: %s\n%s: HINT Conform request to hyperslab documentation at http://nco.sf.net/nco.html#hyp\n",nco_prg_nm_get(),lmt.nm,msg_sng,nco_prg_nm_get());
msg_sng=(char *)nco_free(msg_sng);
nco_exit(EXIT_FAILURE);
} /* !NCO_SYNTAX_ERROR */
/* NB: Subcycle is officially supported only for ncra and ncrcat (record dimension only) */
if(lmt.ssc != 1L && !rec_dmn_and_mfo) (void)fprintf(stderr,"%s: WARNING Subcycle argument is only supported for the record dimension on ncra and ncrcat operations\n",nco_prg_nm_get());
/* Logic depends on whether this is record dimension in multi-file operator */
if(!rec_dmn_and_mfo || !lmt.is_usr_spc_lmt){
/* For non-record dimensions and for record dimensions where limit
was automatically generated (to include whole file), starting
and ending indices are simply minimum and maximum indices already
in structure */
lmt.srt=lmt.min_idx;
lmt.end=lmt.max_idx;
}else{
/* Initialize rec_skp_vld_prv to 0L on first call to nco_lmt_evl()
This is necessary due to intrinsic hysterisis of rec_skp_vld_prv
rec_skp_vld_prv is used only by multi-file operators
rec_skp_vld_prv counts records skipped at end of previous valid file
rec_usd_cml and rec_skp_ntl_spf are both zero only for first file */
if(rec_usd_cml == 0L && lmt.rec_skp_ntl_spf == 0L) lmt.rec_skp_vld_prv=0L;
/* For record dimensions with user-specified limits, allow possibility
that limits pertain to record dimension in a multi-file operator.
Then user-specified maximum index may exceed number of records in any one file
Thus lmt.srt does not necessarily equal lmt.min_idx and
lmt.end does not necessarily equal lmt.max_idx */
/* NB: Stride is officially supported for ncks (all dimensions) and for ncra and ncrcat (record dimension only) */
if(lmt.srd != 1L && nco_prg_id != ncks && !lmt.is_rec_dmn) (void)fprintf(stderr,"%s: WARNING Stride argument for non-record dimension is only supported by ncks, use at your own risk...\n",nco_prg_nm_get());
{ /* Block hides scope of local internal variables */
long srt_min_lcl; /* [idx] Minimum start index (in absolute index space, i.e., relative to first file) for current file */
long end_max_lcl; /* [idx] Maximum end index (in absolute index space, i.e., relative to first file) for current file */
srt_min_lcl=(lmt.is_usr_spc_min ? lmt.min_idx : lmt.rec_in_cml+0L);
end_max_lcl=(lmt.is_usr_spc_max ? lmt.max_idx : lmt.rec_in_cml+dmn_sz-1L);
/* Maximum allowed index in record dimension */
lmt.idx_end_max_abs=end_max_lcl;
/* Are we past file containing end_max_lcl yet? */
if(end_max_lcl < lmt.rec_in_cml){
/* This and all subsequent files are superfluous because all requested records have already been read
Optimize MFOs by checking "input complete" flag to jump out of file loop
Saves time because no other input files will be opened */
lmt.flg_input_complete=True;
flg_no_data_ok=True;
goto no_data_ok;
} /* endif past end_max_lcl */
/* Have we reached file containing srt_min_lcl yet? */
if(srt_min_lcl > lmt.rec_in_cml+dmn_sz-1L){
/* This and all previous files are superfluous because the starting record is in a subsequent file */
flg_no_data_ok=True;
goto no_data_ok;
} /* endif srt_min_lcl in future file */
/* Until records have been used, start index is srt_min_lcl adjusted for records contained in all previous files
Thereafter start index loses memory of/dependence on absolute start index, and only cares for how many records,
if any, were skipped since last valid record. This number, modulo stride, is new start index. */
if(rec_usd_cml == 0L) lmt.srt=srt_min_lcl-lmt.rec_in_cml; else lmt.srt=lmt.srd-1L-lmt.rec_skp_vld_prv%lmt.srd;
if(lmt.srt > dmn_sz-1L){
/* Perhaps data were read in previous file(s) yet next record is in future file due to long stride */
flg_no_data_ok=True;
goto no_data_ok;
} /* endif */
lmt.end=(end_max_lcl < lmt.rec_in_cml+dmn_sz) ? end_max_lcl-lmt.rec_in_cml : dmn_sz-1L;
} /* end block hides scope of local internal variables */
/* If we are here then there are valid records in current file */
} /* endif user-specified limits to record dimension */
} /* end else limit arguments are hyperslab indices */
/* NB: MFO record dimension never reaches this block if current file is superfluous
In that case code has already branched down to flg_data_ok or flg_data_err */
if(rec_dmn_and_mfo){
/* NB: This is---and must be---performed as integer arithmetic */
cnt_rmn_crr=1L+(lmt.end-lmt.srt)/lmt.srd;
/* This fixes "sloppy" specification of end index by user, i.e., ensures that end index coincides with a stride */
lmt.end=lmt.srt+(cnt_rmn_crr-1L)*lmt.srd;
/* Save current rec_skp_vld_prv for diagnostics (printed below) for this file */
rec_skp_vld_prv_dgn=lmt.rec_skp_vld_prv;
/* Next file must know how many records in this file come after (and thus will be skipped) last used record in this file */
lmt.rec_skp_vld_prv=dmn_sz-1L-lmt.end;
} /* !rec_dmn_and_mfo */
/* Compute cnt from srt, end, and srd
This is fine for multi-file record dimensions since those operators read-in one
record at a time and thus never actually use lmt.cnt for record dimension. */
if(lmt.srd == 1L){
if(lmt.srt <= lmt.end) lmt.cnt=lmt.end-lmt.srt+1L; else lmt.cnt=dmn_sz-lmt.srt+lmt.end+1L;
}else{
if(lmt.srt <= lmt.end) lmt.cnt=1L+(lmt.end-lmt.srt)/lmt.srd; else lmt.cnt=1L+((dmn_sz-lmt.srt)+lmt.end)/lmt.srd;
} /* end else */
/* NB: Degenerate cases of WRP && SRD exist for which dmn_cnt_2 == 0
This occurs when srd is large enough, or max_idx small enough,
such that no values are selected in the second read.
e.g., "-d lon,60,0,10" if sz(lon)=128 has dmn_cnt_2 == 0
Since netCDF library reports an error reading and writing cnt=0 dimensions, kludge is necessary
Syntax ensures that it is always the second read, not the first, which is obviated
Therefore we convert these degenerate cases into non-wrapped coordinates to be processed by single read
For these degenerate cases only, [srt,end] are not a permutation of [min_idx,max_idx] */
if(
(lmt.srd != 1L) && /* SRD */
(lmt.srt > lmt.end) && /* WRP */
(lmt.cnt == (1L+(dmn_sz-lmt.srt-1L)/lmt.srd)) && /* dmn_cnt_1 == cnt -> dmn_cnt_2 == 0 */
True){
long greatest_srd_multiplier_1st_hyp_slb; /* Greatest integer m such that srt+m*srd < dmn_sz */
long last_good_idx_1st_hyp_slb; /* C-index of last valid member of 1st hyperslab (= srt+m*srd) */
/* long left_over_idx_1st_hyp_slb;*/ /* # of elements from first hyperslab that count towards current stride */
long first_good_idx_2nd_hyp_slb; /* C-index of first valid member of 2nd hyperslab, if any */
/* NB: Perform these operations with integer arithmetic or else! */
/* Wrapped dimensions with stride may not start at idx 0 on second read */
greatest_srd_multiplier_1st_hyp_slb=(dmn_sz-lmt.srt-1L)/lmt.srd;
last_good_idx_1st_hyp_slb=lmt.srt+lmt.srd*greatest_srd_multiplier_1st_hyp_slb;
/* left_over_idx_1st_hyp_slb=dmn_sz-last_good_idx_1st_hyp_slb-1L;*/
first_good_idx_2nd_hyp_slb=(last_good_idx_1st_hyp_slb+lmt.srd)%dmn_sz;
/* Conditions causing dmn_cnt_2 == 0 */
if(first_good_idx_2nd_hyp_slb > lmt.end) lmt.end=last_good_idx_1st_hyp_slb;
} /* end if */
/* Cases where domain brackets no data, in error, have counts set to zero here
This kludge allows codepaths for both WRP and out-of-domain to flow without goto statements
Out-of-domain errors will soon exit with error, while WRP conditions will proceed */
if(flg_no_data_err) lmt.cnt=0L;
/* Exit when valid bracketed range contains no coordinates and this is not a superfluous file in an MFO */
if(lmt.cnt == 0){
if(lmt.lmt_typ == lmt_crd_val || lmt.lmt_typ == lmt_udu_sng){
(void)fprintf(stdout,"%s: ERROR %s reports domain %15.9e <= %s <= %15.9e brackets no coordinate values\n",nco_prg_nm_get(),fnc_nm,lmt.min_val,lmt.nm,lmt.max_val);
if(lmt.min_sng) (void)fprintf(stdout,"%s: INFO user-specified coordinate minimum: \"%s\"\n",nco_prg_nm_get(),lmt.min_sng);
if(lmt.max_sng) (void)fprintf(stdout,"%s: INFO user-specified coordinate maximum: \"%s\"\n",nco_prg_nm_get(),lmt.max_sng);
} /* !lmt_typ */
if(lmt.lmt_typ == lmt_dmn_idx) (void)fprintf(stdout,"%s: ERROR Indices bracket empty domain for %s\n",nco_prg_nm_get(),lmt.nm);
nco_exit(EXIT_FAILURE);
} /* !lmt.cnt */
/* Coordinate-valued limits that bracket no values in current file jump here with goto
Index-valued limits with no values in current file flow here naturally */
no_data_ok: /* end goto */
if(flg_no_data_ok){
/* File is superfluous (contributes no data) to specified hyperslab
Set output parameters to well-defined state
This state must not cause ncra or ncrcat to retrieve any data
ncra and ncrcat use loops for the record dimension, so this is
accomplished by setting loop control values (lmt_rec.srt > lmt_rec.end)
that cause record loop always to be skipped (never entered) */
lmt.srt=-1L;
lmt.end=lmt.srt-1L;
lmt.cnt=-1L;
/* Augment number of records skipped in initial superfluous files */
if(rec_usd_cml == 0L) lmt.rec_skp_ntl_spf+=dmn_sz;
/* Augment records skipped since last good one */
lmt.rec_skp_vld_prv+=dmn_sz;
/* Set variables to preserve utility of diagnostics at end of routine */
cnt_rmn_crr=rec_skp_vld_prv_dgn=0L;
} /* endif */
/* Accumulate count of records in all opened files, including this one
Increment here at end so this structure member includes records from current file
only at end of this routine, where it can only be used diagnostically
NB: Location of this augmentation is important! Moving it would have side-effects! */
lmt.rec_in_cml+=dmn_sz;
if(nco_dbg_lvl_get() >= nco_dbg_old){
(void)nco_prn_lmt(lmt,min_lmt_typ,FORTRAN_IDX_CNV,flg_no_data_ok,rec_usd_cml,monotonic_direction,rec_dmn_and_mfo,cnt_rmn_ttl,cnt_rmn_crr,rec_skp_vld_prv_dgn);
} /* end dbg */
if(lmt.srt > lmt.end && !flg_no_data_ok){
if(nco_prg_id != ncks) (void)fprintf(stderr,"WARNING: Possible instance of Schweitzer data hole requiring better diagnostics TODO #148\n");
if(nco_prg_id != ncks) (void)fprintf(stderr,"HINT: If operation fails, try multislabbing (http://nco.sf.net/nco.html#msa) wrapped dimension using ncks first, and then apply %s to the resulting file\n",nco_prg_nm_get());
} /* end dbg */
/* Place contents of working structure in location of returned structure */
*lmt_ptr=lmt;
fl_udu_sng=(char *)nco_free(fl_udu_sng);
} /* !nco_lmt_evl_dmn_crd() */
|
common_template_functions.h | /*
* common_template_functions.h
*
* Created on: 27/10/2019
* Author: fernando
*/
#ifndef COMMON_TEMPLATE_FUNCTIONS_H_
#define COMMON_TEMPLATE_FUNCTIONS_H_
#include <random>
#include <algorithm>
#include <fstream>
#include <sstream>
#include <iomanip>
#include <cassert>
#include "device_functions.h"
#ifdef OMP
#include <omp.h>
#endif
#if (__CUDACC_VER_MAJOR__ <= 7)
#include "../../mxm/half.hpp"
using half = half_float::half;
#endif
#define CHAR_CAST(x) (reinterpret_cast<char*>(x))
#define GENERATOR_MAXABSVALUE_GEMM 10
#define GENERATOR_MINABSVALUE_GEMM -GENERATOR_MAXABSVALUE_GEMM
#define GENERATOR_MAXABSVALUE_TENSOR 10
#define GENERATOR_MINABSVALUE_TENSOR -GENERATOR_MAXABSVALUE_TENSOR
static std::ostream& operator<<(std::ostream& os, const dim3 d) {
os << d.x << " " << d.y << " " << d.z;
return os;
}
template<typename T>
bool read_from_file(std::string& path, std::vector<T>& array) {
std::ifstream input(path, std::ios::binary);
if (input.good()) {
input.read(CHAR_CAST(array.data()), array.size() * sizeof(T));
input.close();
return true;
}
return false;
}
template<typename T>
bool write_to_file(std::string& path, std::vector<T>& array) {
std::ofstream output(path, std::ios::binary);
if (output.good()) {
output.write(CHAR_CAST(array.data()), array.size() * sizeof(T));
output.close();
return true;
}
return false;
}
static bool exists(std::string& path) {
std::ifstream input(path);
auto file_exists = input.good();
input.close();
return file_exists;
}
template<typename half_t, typename real_t>
void write_abc_files(
std::string& a_file_path, std::vector<half_t>& a_vector,
std::string& b_file_path, std::vector<half_t>& b_vector,
std::string& c_file_path, std::vector<real_t>& c_vector
) {
if (write_to_file(a_file_path, a_vector) == false) {
throw_line(a_file_path + " could not be written\n");
}
if (write_to_file(b_file_path, b_vector) == false) {
throw_line(b_file_path + " could not be written\n");
}
if (write_to_file(c_file_path, c_vector) == false) {
throw_line(c_file_path + " could not be written\n");
}
}
template<typename half_t, typename real_t>
void read_abc_files(
std::string& a_file_path, std::vector<half_t>& a_vector,
std::string& b_file_path, std::vector<half_t>& b_vector,
std::string& c_file_path, std::vector<real_t>& c_vector
) {
if (read_from_file(a_file_path, a_vector) == false) {
throw_line(a_file_path + " could not be read\n");
}
if (read_from_file(b_file_path, b_vector) == false) {
throw_line(b_file_path + " could not be read\n");
}
if (read_from_file(c_file_path, c_vector) == false) {
throw_line(c_file_path + " could not be read\n");
}
}
template<typename real_t>
void write_gold(std::string& d_file_path, std::vector<real_t>& d_vector) {
if (write_to_file(d_file_path, d_vector) == false) {
throw_line(d_file_path + " could not be written\n");
}
}
template<typename real_t>
void read_gold(std::string& d_file_path, std::vector<real_t>& d_vector) {
if (read_from_file(d_file_path, d_vector) == false) {
throw_line(d_file_path + " could not be read\n");
}
}
template<typename half_t, typename real_t, const bool TENSOR_INPUT = false>
void get_input_matrices(size_t matrix_size, std::vector<half_t>& a_vector,
std::vector<half_t>& b_vector, std::vector<real_t>& c_vector,
std::string& a_file_path, std::string& b_file_path,
std::string& c_file_path, const bool read_abc = false) {
std::random_device rd; //Will be used to obtain a seed for the random number engine
std::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd()
double min_val = GENERATOR_MINABSVALUE_GEMM;
double max_val = GENERATOR_MAXABSVALUE_GEMM;
if (TENSOR_INPUT) {
min_val = GENERATOR_MINABSVALUE_TENSOR;
max_val = GENERATOR_MAXABSVALUE_TENSOR;
}
std::uniform_real_distribution<double> dis(min_val, max_val);
a_vector.resize(matrix_size * matrix_size);
b_vector.resize(matrix_size * matrix_size);
c_vector.resize(matrix_size * matrix_size);
if (read_abc == false) {
#pragma omp parallel for
for (size_t i = 0; i < matrix_size * matrix_size; i++) {
a_vector[i] = half_t(dis(gen));
b_vector[i] = half_t(dis(gen));
c_vector[i] = real_t(dis(gen));
}
write_abc_files(
a_file_path, a_vector,
b_file_path, b_vector,
c_file_path, c_vector);
} else {
read_abc_files(
a_file_path, a_vector,
b_file_path, b_vector,
c_file_path, c_vector
);
}
}
static unsigned long long dmr_errors() {
unsigned long long ret = 0;
rad::checkFrameworkErrors(
cudaMemcpyFromSymbol(&ret, errors, sizeof(unsigned long long), 0,
cudaMemcpyDeviceToHost));
unsigned long long tmp = 0;
rad::checkFrameworkErrors(
cudaMemcpyToSymbol(errors, &tmp, sizeof(unsigned long long), 0,
cudaMemcpyHostToDevice));
return ret;
}
template<typename real_t>
bool equals(real_t& lhs, real_t& rhs, const uint32_t threshold = 0) {
return lhs == rhs;
}
static bool equals(half& lhs, half& rhs, const uint32_t threshold = 0) {
return float(lhs) == float(rhs);
}
static std::ostream& operator<<(std::ostream& os, half &rhs) {
os << float(rhs);
return os;
}
static float fabs(half h) {
return fabs(float(h));
}
static bool equals(float& lhs, double& rhs, const uint32_t threshold) {
assert(sizeof(float) == sizeof(uint32_t));
float rhs_float = float(rhs);
uint32_t lhs_data;
uint32_t rhs_data;
memcpy(&lhs_data, &lhs, sizeof(uint32_t));
memcpy(&rhs_data, &rhs_float, sizeof(uint32_t));
auto diff = SUB_ABS(lhs_data, rhs_data);
return (diff <= threshold);
}
template<class t>
void debug_mxm(std::vector<t>& a, std::vector<t>& b, std::vector<t> c,
std::vector<t>& c_gpu, float alpha, float beta, int n) {
for (auto i = 0; i < n; ++i) {
for (auto j = 0; j < n; ++j) {
t sum = 0;
for (auto k = 0; k < n; ++k) {
sum += a[i * n + k] * b[k * n + j];
}
c[i * n + j] = alpha * sum + c[i * n + j] * beta;
}
}
for (unsigned i = 0; i < c_gpu.size(); i++) {
if (fabs(c[i] - c_gpu[i]) > 1.0e-4) {
std::cout << "i: " << i << " CPU: " << c[i] << " GPU: " << c_gpu[i]
<< std::endl;
}
}
}
template<class half_t, class real_t>
std::pair<int, int> check_output_errors_dmr(std::vector<real_t>& gold,
std::vector<real_t>& real_vector, std::vector<half_t>& half_vector,
Parameters& parameter, const uint32_t threshold, const bool dmr) {
uint32_t host_errors = 0;
uint32_t memory_errors = 0;
#ifdef OMP
#pragma omp parallel for shared(host_errors, memory_errors)
#endif
for (size_t i = 0; i < gold.size(); i++) {
auto gold_value = gold[i];
real_t full_precision = real_vector[i];
half_t half_precision = (dmr == true) ? half_vector[i] : real_vector[i];
//Check if DMR is OK
bool dmr_equals = equals(half_precision, full_precision, threshold);
//Is output corrupted
bool is_output_diff = !equals(gold_value, full_precision);
if (!equals(gold_value, full_precision)) {
#ifdef OMP
#pragma omp critical
{
#endif
std::stringstream error_detail("");
error_detail << std::setprecision(20) << std::scientific;
error_detail << "p: [" << int(floor(i / parameter.size_matrices)) << ", "
<< i % parameter.size_matrices << "], r: ";
error_detail << full_precision;
error_detail << ", e: " << gold_value << " smaller_precision: "
<< half_precision;
if (parameter.verbose && (host_errors < 10)) {
std::cout << error_detail.str() << std::endl;
std::cout << is_output_diff << " " << !dmr_equals << std::endl;
}
parameter.log_error(error_detail.str());
host_errors++;
if (is_output_diff && dmr_equals && dmr) {
memory_errors++;
}
#ifdef OMP
}
#endif
}
}
auto dmr_err = dmr_errors();
if (dmr_err != 0) {
std::string error_detail;
error_detail = "detected_dmr_errors: " + std::to_string(dmr_err);
parameter.log_info(error_detail);
}
if (memory_errors != 0) {
parameter.log_info("dmr1_equals_dmr2_detected");
}
parameter.update_error_count(host_errors);
if (host_errors != 0)
std::cout << "#";
return {dmr_err, host_errors};
}
#endif /* COMMON_TEMPLATE_FUNCTIONS_H_ */
|
opencl_encfs_fmt_plug.c | /*
* Modified by Dhiru Kholia <dhiru at openwall.com> for Keychain format.
*
* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted. */
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_encfs;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_encfs);
#else
#include <stdint.h>
#include <string.h>
#include <openssl/opensslv.h>
#include <openssl/crypto.h>
#include <openssl/ssl.h>
#include <openssl/bio.h>
#include <openssl/evp.h>
#include <openssl/hmac.h>
#include <openssl/engine.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "common-opencl.h"
#include "arch.h"
#include "formats.h"
#include "common.h"
#include "encfs_common.h"
#include "options.h"
#include "misc.h"
#define OUTLEN (32 + 16)
#include "opencl_pbkdf2_hmac_sha1.h"
#define FORMAT_LABEL "encfs-opencl"
#define FORMAT_NAME "EncFS"
#define OCL_ALGORITHM_NAME "PBKDF2-SHA1 OpenCL"
#define CPU_ALGORITHM_NAME " AES/Blowfish"
#define ALGORITHM_NAME OCL_ALGORITHM_NAME CPU_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 64
#define SALT_SIZE sizeof(*cur_salt)
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_ALIGN MEM_ALIGN_WORD
/* This handles all widths */
#define GETPOS(i, index) (((index) % ocl_v_width) * 4 + ((i) & ~3U) * ocl_v_width + (((i) & 3) ^ 3) + ((index) / ocl_v_width) * 64 * ocl_v_width)
static int *cracked;
static int any_cracked;
static const int KEY_CHECKSUM_BYTES = 4;
static encfs_common_custom_salt *cur_salt;
static struct fmt_tests tests[] = {
{"$encfs$192*181474*0*20*f1c413d9a20f7fdbc068c5a41524137a6e3fb231*44*9c0d4e2b990fac0fd78d62c3d2661272efa7d6c1744ee836a702a11525958f5f557b7a973aaad2fd14387b4f", "openwall"},
{"$encfs$128*181317*0*20*e9a6d328b4c75293d07b093e8ec9846d04e22798*36*b9e83adb462ac8904695a60de2f3e6d57018ccac2227251d3f8fc6a8dd0cd7178ce7dc3f", "Jupiter"},
{"$encfs$256*714949*0*20*472a967d35760775baca6aefd1278f026c0e520b*52*ac3b7ee4f774b4db17336058186ab78d209504f8a58a4272b5ebb25e868a50eaf73bcbc5e3ffd50846071c882feebf87b5a231b6", "Valient Gough"},
{"$encfs$256*120918*0*20*e6eb9a85ee1c348bc2b507b07680f4f220caa763*52*9f75473ade3887bca7a7bb113fbc518ffffba631326a19c1e7823b4564ae5c0d1e4c7e4aec66d16924fa4c341cd52903cc75eec4", "Alo3San1t@nats"},
{NULL}
};
static size_t key_buf_size;
static unsigned int *inbuffer;
static pbkdf2_out *output;
static pbkdf2_salt currentsalt;
static cl_mem mem_in, mem_out, mem_salt, mem_state;
static int new_keys;
static struct fmt_main *self;
static cl_kernel pbkdf2_init, pbkdf2_loop, pbkdf2_final;
#define cracked_size (sizeof(*cracked) * global_work_size * ocl_v_width)
/*
* HASH_LOOPS is ideally made by factors of (iteration count - 1) and should
* be chosen for a kernel duration of not more than 200 ms
*/
#define HASH_LOOPS (3 * 251)
#define ITERATIONS 181474 /* Just for auto tune */
#define LOOP_COUNT (((currentsalt.iterations - 1 + HASH_LOOPS - 1)) / HASH_LOOPS)
#define STEP 0
#define SEED 128
static const char * warn[] = {
"P xfer: " , ", init: " , ", loop: " , ", final: ", ", res xfer: "
};
static int split_events[] = { 2, -1, -1 };
//This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
size_t s;
s = autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_init);
s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_loop));
s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_final));
return s;
}
#if 0
struct fmt_main *me;
#endif
static void create_clobj(size_t gws, struct fmt_main *self)
{
gws *= ocl_v_width;
key_buf_size = PLAINTEXT_LENGTH * gws;
/// Allocate memory
inbuffer = mem_calloc(1, key_buf_size);
output = mem_alloc(sizeof(pbkdf2_out) * gws);
cracked = mem_calloc(1, cracked_size);
mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, key_buf_size, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating mem in");
mem_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, sizeof(pbkdf2_salt), NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating mem setting");
mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, sizeof(pbkdf2_out) * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating mem out");
mem_state = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(pbkdf2_state) * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating mem_state");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 1, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_loop, 0, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 0, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument");
}
static void release_clobj(void)
{
if (cracked) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_salt), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_state), "Release mem state");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(output);
MEM_FREE(cracked);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(pbkdf2_init), "Release kernel");
HANDLE_CLERROR(clReleaseKernel(pbkdf2_loop), "Release kernel");
HANDLE_CLERROR(clReleaseKernel(pbkdf2_final), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
static char valgo[sizeof(ALGORITHM_NAME) + 8] = "";
self = _self;
opencl_prepare_dev(gpu_id);
/* VLIW5 does better with just 2x vectors due to GPR pressure */
if (!options.v_width && amd_vliw5(device_info[gpu_id]))
ocl_v_width = 2;
else
ocl_v_width = opencl_get_vector_width(gpu_id, sizeof(cl_int));
if (ocl_v_width > 1) {
/* Run vectorized kernel */
snprintf(valgo, sizeof(valgo),
OCL_ALGORITHM_NAME " %ux" CPU_ALGORITHM_NAME, ocl_v_width);
self->params.algorithm_name = valgo;
}
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DHASH_LOOPS=%u -DOUTLEN=%u "
"-DPLAINTEXT_LENGTH=%u -DV_WIDTH=%u",
HASH_LOOPS, OUTLEN, PLAINTEXT_LENGTH, ocl_v_width);
opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_kernel.cl", gpu_id, build_opts);
pbkdf2_init = clCreateKernel(program[gpu_id], "pbkdf2_init", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel");
crypt_kernel = pbkdf2_loop = clCreateKernel(program[gpu_id], "pbkdf2_loop", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel");
pbkdf2_final = clCreateKernel(program[gpu_id], "pbkdf2_final", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel");
//Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 2*HASH_LOOPS, split_events,
warn, 2, self, create_clobj,
release_clobj,
ocl_v_width * sizeof(pbkdf2_state), 0, db);
//Auto tune execution from shared/included code.
autotune_run(self, 2 * (ITERATIONS - 1) + 4, 0,
(cpu(device_info[gpu_id]) ?
1000000000 : 10000000000ULL));
}
}
static void set_salt(void *salt)
{
cur_salt = (encfs_common_custom_salt*)salt;
memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->saltLen);
currentsalt.length = cur_salt->saltLen;
currentsalt.iterations = cur_salt->iterations;
currentsalt.outlen = cur_salt->keySize + cur_salt->ivLength;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(pbkdf2_salt), ¤tsalt, 0, NULL, NULL), "Copy salt to gpu");
}
static void clear_keys(void) {
memset(inbuffer, 0, key_buf_size);
}
static void set_key(char *key, int index)
{
int i;
int length = strlen(key);
for (i = 0; i < length; i++)
((char*)inbuffer)[GETPOS(i, index)] = key[i];
new_keys = 1;
}
static char* get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
int i = 0;
while (i < PLAINTEXT_LENGTH &&
(ret[i] = ((char*)inbuffer)[GETPOS(i, index)]))
i++;
ret[i] = 0;
return ret;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int i, j, index;
size_t scalar_gws;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER_VW(count, local_work_size);
scalar_gws = global_work_size * ocl_v_width;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
/// Copy data to gpu
if (ocl_autotune_running || new_keys) {
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, key_buf_size, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu");
new_keys = 0;
}
/// Run kernels
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_init, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run initial kernel");
for (j = 0; j < (ocl_autotune_running ? 1 : (currentsalt.outlen + 19) / 20); j++) {
for (i = 0; i < (ocl_autotune_running ? 1 : LOOP_COUNT); i++) {
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_loop, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[2]), "Run loop kernel");
BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel");
opencl_process_event();
}
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_final, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[3]), "Run intermediate kernel");
}
/// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, sizeof(pbkdf2_out) * scalar_gws, output, 0, NULL, multi_profilingEvent[4]), "Copy result back");
if (!ocl_autotune_running) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
int i;
unsigned char master[MAX_KEYLENGTH + MAX_IVLENGTH];
unsigned char tmpBuf[cur_salt->dataLen];
unsigned int checksum = 0;
unsigned int checksum2 = 0;
memcpy(master, output[index].dk, cur_salt->keySize + cur_salt->ivLength);
// First N bytes are checksum bytes.
for (i = 0; i < KEY_CHECKSUM_BYTES; ++i)
checksum = (checksum << 8) | (unsigned int)cur_salt->data[i];
memcpy(tmpBuf, cur_salt->data + KEY_CHECKSUM_BYTES, cur_salt->keySize + cur_salt->ivLength);
encfs_common_streamDecode(cur_salt, tmpBuf, cur_salt->keySize + cur_salt->ivLength ,checksum, master);
checksum2 = encfs_common_MAC_32(cur_salt, tmpBuf, cur_salt->keySize + cur_salt->ivLength, master);
if (checksum2 == checksum)
{
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_opencl_encfs = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
tests
}, {
init,
done,
reset,
fmt_default_prepare,
encfs_common_valid,
fmt_default_split,
fmt_default_binary,
encfs_common_get_salt,
{
encfs_common_iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
ma_open_cl_info.c | #define N "\n"
const char* COPYRIGHT =
/*##################################################################*/
/*#*/ /*#*/
/*#*/ "Ma_Sys.ma OpenCL info and testing program 1.0.0, " /*#*/
/*#*/ "Copyright (c) 2016 Ma_Sys.ma." /*#*/ N
/*#*/ "For further info send an e-mail to Ma_Sys.ma@web.de." /*#*/ N
/*#*/ /*#*/
/*##################################################################*/ ;
const char* GPL[] = {
"This program is free software: you can redistribute it and/or modify" N
"it under the terms of the GNU General Public License as published by" N
"the Free Software Foundation, either version 3 of the License, or" N
"(at your option) any later version." N
N
"This program is distributed in the hope that it will be useful," N
"but WITHOUT ANY WARRANTY; without even the implied warranty of" N
"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the" N
"GNU General Public License for more details." ,
N
"You should have received a copy of the GNU General Public License" N
"along with this program. If not, see <http://www.gnu.org/licenses/>." N
};
/*
* Compilation
* $ gcc -fopenmp -lOpenCL -std=c89 -Wall -o ma_open_cl_info ma_open_cl_info.c
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
/*
* http://dhruba.name/2012/08/14/opencl-cookbook-listing-all-devices-and-
* their-critical-attributes/
* https://developer.apple.com/library/mac/samplecode/
* OpenCL_Hello_World_Example/Listings/hello_c.html
*/
#ifdef __APPLE__
# include <OpenCL/opencl.h>
#else
# include <CL/cl.h>
#endif
#define MULTIPLY_REPEAT 64 /* need to be MULTIPLY_REPEAT = 2^X, X in N */
#define EPSILON 0.00000001f
#define RAND_UPPER_BOUND 0.09f
#define DEFAULT_PROBLEM_SIZE 2048lu
#define INFOBUFSIZ 512
struct platform_info {
char require_gpu;
/** OpenGL platform */
cl_uint num_platforms;
cl_platform_id* platforms;
cl_uint* num_devices;
cl_device_id** devices;
/** Tests */
char skip_cpu;
unsigned long problem_size;
/* unidimensional notation for 2D matrices (more Open-CL like) */
float* example_a;
float* example_b;
float* result;
};
static void display_help(char* appname);
static int run(unsigned long problem_size, char skip_cpu, char require_gpu);
static void ma_opencl_error(cl_int id, int line);
static char* get_error_string(cl_int code);
static void for_each_platform(struct platform_info* p,
void (*func)(struct platform_info*, int));
static void get_and_print_platform_info(struct platform_info* p, int pid);
static void get_and_print_device_info(int j, cl_device_id d);
static void print_info_str(cl_device_id d, char* buf, int property,
char* property_display);
static void print_info_siz(cl_device_id d, int property,
char* property_display);
static void print_info_int(cl_device_id d, int property,
char* property_display);
static void initialize_tests(struct platform_info* p);
static void matmul_sub(struct platform_info* p, long unsigned i,
float* ca, float* cb);
static float my_randf();
static void delta_t(time_t t0, time_t t1);
static void calculate_on_platform(struct platform_info* p, int pid);
static void calculate_on_device(struct platform_info* p, int did,
cl_device_id d);
static void report_program_build_failure(cl_program prg, cl_device_id d);
static void await_event(cl_event* event);
static void compare_check_equality(unsigned long n, float* expected,
float* got);
static void free_tests(struct platform_info* p);
/*
TODO PROBLEM: DOES NOT WORK WITH LARGE MATRICES BECAUSE THE KERNEL'S
EXECUTION TIME IS SO TIGHTLY LIMITED IT IS EASILY EXCEEDED IF THE INNER
LOOP GOES OVER MORE THAN ABOUT 100 MiB...
*/
const char* MATRIX_MULTIPLICATION_KERENEL_SRC =
"__kernel void matmul(unsigned long problem_size," \
" __global const float* a, __global const float* b, " \
" __global float* result)" \
"{" \
" unsigned long j = get_global_id(0); /* get_g..._size(idx) */" \
" unsigned long i = get_global_id(1);" \
" unsigned long k;" \
" float sum = 0.0f;" \
" for(k = 0; k < problem_size; k++)" \
" sum += a[i * problem_size + k] * " \
" b[k * problem_size + j];" \
" result[i * problem_size + j] = sum;" \
"}";
int main(int argc, char** argv)
{
int ca;
unsigned long problem_size = DEFAULT_PROBLEM_SIZE;
char skip_cpu = 0;
char require_gpu = 0;
puts(COPYRIGHT);
puts(GPL[0]);
puts(GPL[1]);
for(ca = 1; ca < argc; ca++) {
switch(argv[ca][1]) {
case 'h':
case '-':
display_help(argv[0]);
return EXIT_SUCCESS;
case 'p':
if(ca >= argc - 1) {
display_help(argv[0]);
return EXIT_FAILURE;
} else {
problem_size = atol(argv[++ca]);
printf("Info: Changed problem size to %lu\n",
problem_size);
}
break;
case 'n':
printf("Info: Disabled CPU result calculation\n");
skip_cpu = 1;
break;
case 'g':
printf("Info: OpenCL limited to GPU\n");
require_gpu = 1;
case 0:
printf("Warning: Ignored single-letter option: %c\n",
argv[ca][0]);
break;
default:
printf("Warning: Ignored unknown option: %c\n",
*argv[ca]);
}
}
return run(problem_size, skip_cpu, require_gpu);
}
static void display_help(char* appname)
{
printf(
"USAGE %s [-h|--help] [-n] [-g] [-p <N>]\n\n"
"-h Displays this help.\n"
"-n Disables result verification (no CPU precalculation)\n"
"-g Requires OpenCL to be run on GPUs only. "
"(Does not imply -n)\n"
"-p Configure problem size to be N, default %lu\n",
appname, DEFAULT_PROBLEM_SIZE
);
puts(
"\nInformation on how this works\n\n"
"This program multiplies two random float/single-matrices A and B.\n"
"A and B are nxn-matrices where n is the ``problem size''.\n"
"In order to caluclate the result, the OpenCL device needs to provide three\n"
"times the memory necessary to hold one matrix (A, B and a result = 3 Matrices)"
);
printf(
"To be able to compute a comparison-result on CPU we not simply calculate\n"
"A x B, but instead (A x B)^%d. This way we can utilize\n"
"exponentiation by squaring on the CPU. In order to run longer on the GPU\n"
"(for more reliable measures), we do _not_ use exponentiation by squaring on\n"
"the OpenCL device.\n", MULTIPLY_REPEAT
);
}
#define OCCALL(I, E) if((ret = (I)) != CL_SUCCESS) { \
ma_opencl_error(ret, __LINE__); E; }
static int run(unsigned long problem_size, char skip_cpu, char require_gpu)
{
cl_int ret;
struct platform_info p;
p.problem_size = problem_size;
p.skip_cpu = skip_cpu;
p.require_gpu = require_gpu;
OCCALL(clGetPlatformIDs(0, NULL, &p.num_platforms),
return EXIT_FAILURE);
p.platforms = malloc(p.num_platforms * sizeof(cl_platform_id));
OCCALL(clGetPlatformIDs(p.num_platforms, p.platforms, NULL),
free(p.platforms); return EXIT_FAILURE);
p.num_devices = malloc(p.num_platforms * sizeof(cl_uint));
p.devices = malloc(p.num_platforms * sizeof(cl_device_id*));
puts("System information");
for_each_platform(&p, get_and_print_platform_info);
puts("\nTests");
initialize_tests(&p);
for_each_platform(&p, calculate_on_platform);
free_tests(&p);
free(p.devices);
free(p.num_devices);
free(p.platforms);
return EXIT_SUCCESS;
}
static void ma_opencl_error(cl_int id, int line)
{
printf("Failed to run OpenCL statement, line %d, error code %d / %s.\n",
line, id, get_error_string(id));
}
/* -> http://stackoverflow.com/questions/24326432/convenient-way-to-show-opencl-
error-codes */
static char* get_error_string(cl_int code)
{
switch(code) {
case 0: return "CL_SUCCESS";
case -1: return "CL_DEVICE_NOT_FOUND";
case -2: return "CL_DEVICE_NOT_AVAILABLE";
case -3: return "CL_COMPILER_NOT_AVAILABLE";
case -4: return "CL_MEM_OBJECT_ALLOCATION_FAILURE";
case -5: return "CL_OUT_OF_RESOURCES";
case -6: return "CL_OUT_OF_HOST_MEMORY";
case -7: return "CL_PROFILING_INFO_NOT_AVAILABLE";
case -8: return "CL_MEM_COPY_OVERLAP";
case -9: return "CL_IMAGE_FORMAT_MISMATCH";
case -10: return "CL_IMAGE_FORMAT_NOT_SUPPORTED";
case -11: return "CL_BUILD_PROGRAM_FAILURE";
case -12: return "CL_MAP_FAILURE";
case -13: return "CL_MISALIGNED_SUB_BUFFER_OFFSET";
case -14: return "CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST";
case -15: return "CL_COMPILE_PROGRAM_FAILURE";
case -16: return "CL_LINKER_NOT_AVAILABLE";
case -17: return "CL_LINK_PROGRAM_FAILURE";
case -18: return "CL_DEVICE_PARTITION_FAILED";
case -19: return "CL_KERNEL_ARG_INFO_NOT_AVAILABLE";
case -30: return "CL_INVALID_VALUE";
case -31: return "CL_INVALID_DEVICE_TYPE";
case -32: return "CL_INVALID_PLATFORM";
case -33: return "CL_INVALID_DEVICE";
case -34: return "CL_INVALID_CONTEXT";
case -35: return "CL_INVALID_QUEUE_PROPERTIES";
case -36: return "CL_INVALID_COMMAND_QUEUE";
case -37: return "CL_INVALID_HOST_PTR";
case -38: return "CL_INVALID_MEM_OBJECT";
case -39: return "CL_INVALID_IMAGE_FORMAT_DESCRIPTOR";
case -40: return "CL_INVALID_IMAGE_SIZE";
case -41: return "CL_INVALID_SAMPLER";
case -42: return "CL_INVALID_BINARY";
case -43: return "CL_INVALID_BUILD_OPTIONS";
case -44: return "CL_INVALID_PROGRAM";
case -45: return "CL_INVALID_PROGRAM_EXECUTABLE";
case -46: return "CL_INVALID_KERNEL_NAME";
case -47: return "CL_INVALID_KERNEL_DEFINITION";
case -48: return "CL_INVALID_KERNEL";
case -49: return "CL_INVALID_ARG_INDEX";
case -50: return "CL_INVALID_ARG_VALUE";
case -51: return "CL_INVALID_ARG_SIZE";
case -52: return "CL_INVALID_KERNEL_ARGS";
case -53: return "CL_INVALID_WORK_DIMENSION";
case -54: return "CL_INVALID_WORK_GROUP_SIZE";
case -55: return "CL_INVALID_WORK_ITEM_SIZE";
case -56: return "CL_INVALID_GLOBAL_OFFSET";
case -57: return "CL_INVALID_EVENT_WAIT_LIST";
case -58: return "CL_INVALID_EVENT";
case -59: return "CL_INVALID_OPERATION";
case -60: return "CL_INVALID_GL_OBJECT";
case -61: return "CL_INVALID_BUFFER_SIZE";
case -62: return "CL_INVALID_MIP_LEVEL";
case -63: return "CL_INVALID_GLOBAL_WORK_SIZE";
case -64: return "CL_INVALID_PROPERTY";
case -65: return "CL_INVALID_IMAGE_DESCRIPTOR";
case -66: return "CL_INVALID_COMPILER_OPTIONS";
case -67: return "CL_INVALID_LINKER_OPTIONS";
case -68: return "CL_INVALID_DEVICE_PARTITION_COUNT";
case -1000: return "CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR";
case -1001: return "CL_PLATFORM_NOT_FOUND_KHR";
case -1002: return "CL_INVALID_D3D10_DEVICE_KHR";
case -1003: return "CL_INVALID_D3D10_RESOURCE_KHR";
case -1004: return "CL_D3D10_RESOURCE_ALREADY_ACQUIRED_KHR";
case -1005: return "CL_D3D10_RESOURCE_NOT_ACQUIRED_KHR";
default: return "Unknown OpenCL error";
}
}
static void for_each_platform(struct platform_info* p,
void (*func)(struct platform_info*, int))
{
int i;
for(i = 0; i < p->num_platforms; i++)
func(p, i);
}
static void get_and_print_platform_info(struct platform_info* p, int pid)
{
cl_int ret;
int j;
long unsigned device_types = p->require_gpu? CL_DEVICE_TYPE_GPU:
CL_DEVICE_TYPE_ALL;
cl_platform_id cid = p->platforms[pid];
printf("Platform %d: ", pid);
OCCALL(clGetDeviceIDs(cid, device_types, 0, NULL, p->num_devices + pid),
p->num_devices[pid] = 0; return);
printf("%d devices.\n", p->num_devices[pid]);
if(p->num_devices[pid] == 0)
return;
p->devices[pid] = malloc(p->num_devices[pid] * sizeof(cl_device_id));
OCCALL(clGetDeviceIDs(cid, device_types, p->num_devices[pid],
p->devices[pid], NULL), exit(64));
for(j = 0; j < p->num_devices[pid]; j++)
get_and_print_device_info(j, p->devices[pid][j]);
}
static void get_and_print_device_info(int j, cl_device_id d)
{
char info[INFOBUFSIZ];
printf(" Device %d:\n", j);
print_info_str(d, info, CL_DEVICE_VENDOR, "Vendor:");
print_info_str(d, info, CL_DEVICE_NAME, "Name:");
print_info_int(d, CL_DEVICE_MAX_COMPUTE_UNITS, "Compute Units:");
print_info_siz(d, CL_DEVICE_GLOBAL_MEM_SIZE, "Global Memory:");
print_info_siz(d, CL_DEVICE_LOCAL_MEM_SIZE, "Local Memory:");
}
static void print_info_str(cl_device_id d, char* buf, int property,
char* property_display)
{
cl_int ret;
printf(" %-14s ", property_display);
OCCALL(clGetDeviceInfo(d, property, INFOBUFSIZ, buf, NULL), return);
puts(buf);
}
static void print_info_siz(cl_device_id d, int property, char* property_display)
{
cl_int ret;
unsigned long val;
printf(" %-14s ", property_display);
OCCALL(clGetDeviceInfo(d, property, sizeof(cl_ulong), &val, NULL),
return);
if(val > 1048576)
printf("%lu MiB\n", val / 1024 / 1024);
else if(val > 1024)
printf("%lu KiB\n", val / 1024);
else
printf("%lu B\n", val);
}
static void print_info_int(cl_device_id d, int property, char* property_display)
{
cl_int ret;
cl_uint val;
printf(" %-14s ", property_display);
OCCALL(clGetDeviceInfo(d, property, sizeof(cl_uint), &val, NULL),
return);
printf("%u\n", val);
}
static void initialize_tests(struct platform_info* p)
{
long unsigned asz;
long unsigned i;
int crep;
float* r2;
float* ca;
float* cb;
time_t t[4];
asz = p->problem_size * p->problem_size;
printf("Initializing Tests (asz=%lu MiB) ... ", asz * sizeof(float) /
1024 / 1024);
fflush(stdout);
time(t);
p->example_a = malloc(asz * sizeof(float));
p->example_b = malloc(asz * sizeof(float));
p->result = p->skip_cpu? NULL: malloc(asz * sizeof(float));
time(t + 1);
for(i = 0; i < asz; i++) {
p->example_a[i] = my_randf();
p->example_b[i] = my_randf();
}
time(t + 2);
if(!p->skip_cpu) {
/* TODO STRANGE BUG / IT SEEMS THIS IS NOT REALLY FASTER THAN THE OMP VARIANT ON THE VERY SAME DEVICE (CPU) ... SOMEHOW MESSED UP EFFICIENT MATRIX MULTIPLICATION? */
r2 = malloc(asz * sizeof(float));
ca = p->example_a;
cb = p->example_b;
for(crep = MULTIPLY_REPEAT; crep > 0; crep >>= 1) {
#pragma omp parallel for
for(i = 0; i < p->problem_size; i++)
matmul_sub(p, i, ca, cb);
ca = p->result;
cb = p->result;
p->result = r2;
r2 = ca;
}
free(p->result);
p->result = r2;
}
time(t + 3);
printf("talloc=");
delta_t(t[0], t[1]);
printf(" trnd=");
delta_t(t[1], t[2]);
printf(" tcalc=");
delta_t(t[2], t[3]);
printf(" tS=");
delta_t(t[0], t[3]);
putchar('\n');
}
static void matmul_sub(struct platform_info* p, long unsigned i,
float* ca, float* cb)
{
long unsigned j;
long unsigned k;
float sum;
for(j = 0; j < p->problem_size; j++) {
sum = 0.0f;
for(k = 0; k < p->problem_size; k++)
sum += ca[i * p->problem_size + k] *
cb[k * p->problem_size + j];
p->result[i * p->problem_size + j] = sum;
}
}
static float my_randf()
{
float val = (float)((double)rand()/(double)(RAND_MAX/RAND_UPPER_BOUND));
return (rand() >= (RAND_MAX/2))? val: -val;
}
static void delta_t(time_t t0, time_t t1)
{
printf("%0.1f", difftime(t1, t0));
}
static void calculate_on_platform(struct platform_info* p, int pid)
{
int i;
if(p->num_devices[pid] == 0)
return;
printf("Platform %d\n", pid);
for(i = 0; i < p->num_devices[pid]; i++)
calculate_on_device(p, i, p->devices[pid][i]);
free(p->devices[pid]);
}
#define OCCALLB(I, M, E) if(!(I)) { printf(" %s: ", M); \
ma_opencl_error(ret, __LINE__); E; }
static void calculate_on_device(struct platform_info* p, int did,
cl_device_id d)
{
cl_int ret = CL_SUCCESS;
cl_context context;
cl_command_queue queue;
cl_program prg;
cl_kernel kernel;
cl_event event;
cl_mem a;
cl_mem b;
cl_mem result;
cl_mem* ca;
cl_mem* cb;
cl_mem* cr;
cl_mem* bak = NULL;
float* result_ocl;
int crep;
size_t wdsz[2];
unsigned long n = p->problem_size * p->problem_size;
unsigned long memsz = n * sizeof(float);
time_t t[6];
printf(" Device %d\n", did);
result_ocl = malloc(memsz);
wdsz[0] = p->problem_size;
wdsz[1] = p->problem_size;
time(t);
OCCALLB(context = clCreateContext(NULL, 1, &d, NULL, NULL, &ret),
"Compute Context could not be created", goto r_r);
OCCALLB(queue = clCreateCommandQueue(context, d, 0, &ret),
"Command Queue could not be created", goto r_ctx);
OCCALLB(prg = clCreateProgramWithSource(context, 1,
&MATRIX_MULTIPLICATION_KERENEL_SRC, NULL, &ret),
"Program could not be created", goto r_q);
OCCALL(clBuildProgram(prg, 0, NULL, NULL, NULL, NULL),
report_program_build_failure(prg, d); goto r_prg);
OCCALLB(kernel = clCreateKernel(prg, "matmul", &ret),
"Failed to create kernel", goto r_prg);
time(t + 1);
OCCALLB(a = clCreateBuffer(context,
CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR,
memsz, p->example_a, &ret),
"Failed to allocate buffer `a`", goto r_prg);
OCCALLB(b = clCreateBuffer(context,
CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR,
memsz, p->example_b, &ret),
"Failed to allocate buffer `b`", goto r_ba);
OCCALLB(result = clCreateBuffer(context, CL_MEM_READ_WRITE, memsz, NULL,
&ret),
"Failed to allocate result buffer", goto r_bb);
/* CALCULATION */
time(t + 2);
ca = &a;
cb = &b;
cr = &result;
OCCALL(clSetKernelArg(kernel, 0, sizeof(unsigned long),
&p->problem_size), goto r_br);
for(crep = MULTIPLY_REPEAT; crep > 0; crep--) {
if(crep == MULTIPLY_REPEAT - 1) {
/* cr:R, ca:A, cb:B, bak:X -> cr:A, ca:R, cb:R, bak:B */
bak = cb; /* bak:X -> bak:B */
cb = cr; /* cb:B -> cb:R */
cr = ca; /* cr:R -> cr:A */
ca = cb; /* ca:A -> ca:R */
} else if(crep != MULTIPLY_REPEAT) {
/* cr:A, ca:R, cb:R, bak:B -> cr:B, ca:A, cb:R, bak:A */
/* cr:B, ca:A, cb:R, bak:A -> cr:A, ca:B, cb:R, bak:B */
/* cr:A, ca:B, cb:R, bak:B -> cr:B, ca:A, cb:R, bak:A */
ca = cr; /* ca:R -> ca:A */
cr = bak; /* cr:A -> cr:B */
bak = ca; /* bak:B -> bak:A */
}
OCCALL(clSetKernelArg(kernel, 1, sizeof(cl_mem), ca),
goto r_br);
OCCALL(clSetKernelArg(kernel, 2, sizeof(cl_mem), cb),
goto r_br);
OCCALL(clSetKernelArg(kernel, 3, sizeof(cl_mem), cr),
goto r_br);
/* INVOC */
OCCALL(clEnqueueNDRangeKernel(queue, kernel, 2, NULL, wdsz,
NULL, 0, NULL, &event), goto r_br);
await_event(&event);
}
time(t + 3);
OCCALL(clEnqueueReadBuffer(queue, *cr, CL_TRUE, 0, memsz, result_ocl, 0,
NULL, &event), goto r_br);
await_event(&event);
time(t + 4);
if(!p->skip_cpu)
compare_check_equality(n, p->result, result_ocl);
time(t + 5);
printf(" tinit=");
delta_t(t[0], t[1]);
printf(", tmem=");
delta_t(t[1], t[2]);
printf(", tcalc=");
delta_t(t[2], t[3]);
printf(", tmem2=");
delta_t(t[3], t[4]);
printf(", tcmp=");
delta_t(t[4], t[5]);
printf(", tS/OCL=");
delta_t(t[0], t[4]);
printf(", tS=");
delta_t(t[0], t[5]);
putchar('\n');
r_br: OCCALL(clReleaseMemObject(result), ;);
r_bb: OCCALL(clReleaseMemObject(b), ;);
r_ba: OCCALL(clReleaseMemObject(a), ;);
r_prg: OCCALL(clReleaseProgram(prg), ;);
r_q: OCCALL(clReleaseCommandQueue(queue), ;);
r_ctx: OCCALL(clReleaseContext(context), ;);
r_r: free(result_ocl);
}
static void report_program_build_failure(cl_program prg, cl_device_id d)
{
cl_int ret;
char buf[4096];
printf("Failed to compile program: ");
OCCALL(clGetProgramBuildInfo(prg, d, CL_PROGRAM_BUILD_LOG, sizeof(buf),
buf, NULL), return);
printf("%s\n", buf);
}
static void await_event(cl_event* event)
{
cl_int ret;
OCCALL(clWaitForEvents(1, event), ;);
OCCALL(clReleaseEvent(*event), ;);
}
static void compare_check_equality(unsigned long n, float* expected, float* got)
{
unsigned long i;
printf(" Comparing results... ");
for(i = 0; i < n; i++)
if(abs(expected[i] - got[i]) > EPSILON)
printf("Mismatch@%lu: delta=%f ", i,
expected[i] - got[i]);
printf("finished\n");
}
static void free_tests(struct platform_info* p)
{
free(p->example_a);
free(p->example_b);
if(!p->skip_cpu)
free(p->result);
}
|
BenchUtils.h | /*
* Copyright (c) Meta Platforms, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <chrono>
#include <functional>
#include <vector>
#include <immintrin.h>
#ifdef USE_BLAS
#if __APPLE__
// not sure whether need to differentiate TARGET_OS_MAC or TARGET_OS_IPHONE,
// etc.
#include <Accelerate/Accelerate.h>
#else
#include <cblas.h>
#endif
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef USE_MKL
#include <mkl.h>
#endif
#include "./AlignedVec.h"
#include "fbgemm/FbgemmBuild.h"
#include "fbgemm/FbgemmPackMatrixB.h"
#include "src/RefImplementations.h"
namespace fbgemm {
template <typename T>
void randFill(aligned_vector<T>& vec, T low, T high);
void llc_flush(std::vector<char>& llc);
// Same as omp_get_max_threads() when OpenMP is available, otherwise 1
int fbgemm_get_max_threads();
// Same as omp_get_num_threads() when OpenMP is available, otherwise 1
int fbgemm_get_num_threads();
// Same as omp_get_thread_num() when OpenMP is available, otherwise 0
int fbgemm_get_thread_num();
template <typename T>
NOINLINE float cache_evict(const T& vec) {
auto const size = vec.size();
auto const elemSize = sizeof(typename T::value_type);
auto const dataSize = size * elemSize;
const char* data = reinterpret_cast<const char*>(vec.data());
constexpr int CACHE_LINE_SIZE = 64;
// Not having this dummy computation significantly slows down the computation
// that follows.
float dummy = 0.0f;
for (std::size_t i = 0; i < dataSize; i += CACHE_LINE_SIZE) {
dummy += data[i] * 1.0f;
_mm_mfence();
#ifndef _MSC_VER
asm volatile("" ::: "memory");
#endif
_mm_clflush(&data[i]);
}
return dummy;
}
/**
* Parse application command line arguments
*
*/
int parseArgumentInt(
int argc,
const char* argv[],
const char* arg,
int non_exist_val,
int def_val);
bool parseArgumentBool(
int argc,
const char* argv[],
const char* arg,
bool def_val);
namespace {
struct empty_flush {
void operator()() const {}
};
} // namespace
/**
* @param Fn functor to execute
* @param Fe data eviction functor
*/
template <class Fn, class Fe = std::function<void()>>
double measureWithWarmup(
Fn&& fn,
int warmupIterations,
int measuredIterations,
const Fe& fe = empty_flush(),
bool useOpenMP = false) {
for (int i = 0; i < warmupIterations; ++i) {
// Evict data first
fe();
fn();
}
double ttot = 0.0;
#ifdef _OPENMP
#pragma omp parallel if (useOpenMP)
{
#endif
for (int i = 0; i < measuredIterations; ++i) {
int thread_id = 0;
std::chrono::time_point<std::chrono::high_resolution_clock> start, end;
#ifdef _OPENMP
if (useOpenMP) {
thread_id = omp_get_thread_num();
}
#endif
if (thread_id == 0) {
fe();
}
#ifdef _OPENMP
if (useOpenMP) {
#pragma omp barrier
}
#endif
start = std::chrono::high_resolution_clock::now();
fn();
#ifdef _OPENMP
if (useOpenMP) {
#pragma omp barrier
}
#endif
end = std::chrono::high_resolution_clock::now();
auto dur =
std::chrono::duration_cast<std::chrono::nanoseconds>(end - start);
if (thread_id == 0) {
// TODO: measure load imbalance
ttot += dur.count();
}
}
#ifdef _OPENMP
}
#endif
return ttot / 1e9 / measuredIterations;
}
/*
* @brief Out-of-place transposition for M*N matrix ref.
* @param M number of rows in input
* @param K number of columns in input
*/
template <typename T>
void transpose_matrix(
int M,
int N,
const T* src,
int ld_src,
T* dst,
int ld_dst) {
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
dst[i * ld_dst + j] = src[i + j * ld_src];
}
} // for each output row
}
/*
* @brief In-place transposition for nxk matrix ref.
* @param n number of rows in input (number of columns in output)
* @param k number of columns in input (number of rows in output)
*/
template <typename T>
void transpose_matrix(T* ref, int n, int k) {
std::vector<T> local(n * k);
transpose_matrix(n, k, ref, k, local.data(), n);
memcpy(ref, local.data(), n * k * sizeof(T));
}
#if defined(USE_MKL)
void test_xerbla(char* srname, const int* info, int);
#endif
#define dataset 1
template <typename btype>
void performance_test(
int num_instances,
bool flush,
int repetitions,
bool is_mkl) {
#if defined(USE_MKL)
mkl_set_xerbla((XerblaEntry)test_xerbla);
#endif
float alpha = 1.f, beta = 1.f;
matrix_op_t btran = matrix_op_t::Transpose;
#if dataset == 1
const int NITER = (flush) ? 10 : 100;
std::vector<std::vector<int>> shapes;
for (auto m = 1; m < 120; m++) {
// shapes.push_back({m, 128, 512});
shapes.push_back({m, 512, 512});
}
#elif dataset == 2
const int NITER = (flush) ? 10 : 100;
#include "shapes_dataset.h"
#else
flush = false;
constexpr int NITER = 1;
std::vector<std::vector<int>> shapes;
std::random_device r;
std::default_random_engine generator(r());
std::uniform_int_distribution<int> dm(1, 100);
std::uniform_int_distribution<int> dnk(1, 1024);
for (int i = 0; i < 1000; i++) {
int m = dm(generator);
int n = dnk(generator);
int k = dnk(generator);
shapes.push_back({m, n, k});
}
#endif
std::string type;
double gflops, gbs, ttot;
for (auto s : shapes) {
int m = s[0];
int n = s[1];
int k = s[2];
// initialize with small numbers
aligned_vector<int> Aint(m * k);
randFill(Aint, 0, 4);
std::vector<aligned_vector<float>> A;
for (int i = 0; i < num_instances; ++i) {
A.push_back(aligned_vector<float>(Aint.begin(), Aint.end()));
}
aligned_vector<int> Bint(k * n);
randFill(Bint, 0, 4);
aligned_vector<float> B(Bint.begin(), Bint.end());
std::vector<std::unique_ptr<PackedGemmMatrixB<btype>>> Bp;
for (int i = 0; i < num_instances; ++i) {
Bp.emplace_back(std::unique_ptr<PackedGemmMatrixB<btype>>(
new PackedGemmMatrixB<btype>(btran, k, n, alpha, B.data())));
}
auto kAligned = ((k * sizeof(float) + 64) & ~63) / sizeof(float);
auto nAligned = ((n * sizeof(float) + 64) & ~63) / sizeof(float);
std::vector<aligned_vector<float>> Bt(num_instances);
auto& Bt_ref = Bt[0];
if (btran == matrix_op_t::Transpose) {
Bt_ref.resize(k * nAligned);
for (auto row = 0; row < k; ++row) {
for (auto col = 0; col < n; ++col) {
Bt_ref[row * nAligned + col] = alpha * B[col * k + row];
}
}
} else {
Bt_ref.resize(kAligned * n);
for (auto row = 0; row < k; ++row) {
for (auto col = 0; col < n; ++col) {
Bt_ref[col * kAligned + row] = alpha * B[col * k + row];
}
}
}
for (auto i = 1; i < num_instances; ++i) {
Bt[i] = Bt_ref;
}
std::vector<aligned_vector<float>> C_ref;
std::vector<aligned_vector<float>> C_fb;
if (beta != 0.0f) {
aligned_vector<int> Cint(m * n);
randFill(Cint, 0, 4);
for (int i = 0; i < num_instances; ++i) {
C_ref.push_back(aligned_vector<float>(Cint.begin(), Cint.end()));
C_fb.push_back(aligned_vector<float>(Cint.begin(), Cint.end()));
}
} else {
for (int i = 0; i < num_instances; ++i) {
C_ref.push_back(aligned_vector<float>(m * n, 1.f));
C_fb.push_back(aligned_vector<float>(m * n, NAN));
}
}
double nflops = 2.0 * m * n * k;
double nbytes = 4.0 * m * k + sizeof(btype) * 1.0 * k * n + 4.0 * m * n;
// warm up MKL and fbgemm
// check correctness at the same time
for (auto w = 0; w < 3; w++) {
#if defined(USE_MKL) || defined(USE_BLAS)
cblas_sgemm(
CblasRowMajor,
CblasNoTrans,
CblasNoTrans, // B is pretransposed, if required by operation
m,
n,
k,
1.0, // Mutliplication by Alpha is done during transpose of B
A[0].data(),
k,
Bt[0].data(),
btran == matrix_op_t::NoTranspose ? kAligned : nAligned,
beta,
C_ref[0].data(),
n);
#else
cblas_sgemm_ref(
matrix_op_t::NoTranspose,
matrix_op_t::NoTranspose,
m,
n,
k,
1.0,
A[0].data(),
k,
Bt[0].data(),
(btran == matrix_op_t::NoTranspose) ? kAligned : nAligned,
beta,
C_ref[0].data(),
n);
#endif
#ifdef _OPENMP
#pragma omp parallel if (num_instances == 1)
#endif
{
int num_threads = num_instances == 1 ? fbgemm_get_num_threads() : 1;
int tid = num_instances == 1 ? fbgemm_get_thread_num() : 0;
cblas_gemm_compute(
matrix_op_t::NoTranspose,
m,
A[0].data(),
*Bp[0],
beta,
C_fb[0].data(),
tid,
num_threads);
}
#if defined(USE_MKL) || defined(USE_BLAS)
// Compare results
for (auto i = 0; i < C_ref[0].size(); i++) {
if (std::abs(C_ref[0][i] - C_fb[0][i]) > 1e-3) {
fprintf(
stderr,
"Error: too high diff between fp32 ref %f and fp16 %f at %d\n",
C_ref[0][i],
C_fb[0][i],
i);
return;
}
}
#endif
}
#if defined(USE_MKL)
if (is_mkl) {
// Gold via MKL sgemm
type = "MKL_FP32";
#elif defined(USE_BLAS)
type = "BLAS_FP32";
#else
type = "REF_FP32";
#endif
ttot = measureWithWarmup(
[&]() {
int copy = num_instances == 1 ? 0 : fbgemm_get_thread_num();
for (int i = 0; i < repetitions; ++i) {
#if defined(USE_MKL) || defined(USE_BLAS)
cblas_sgemm(
CblasRowMajor,
CblasNoTrans,
CblasNoTrans,
m,
n,
k,
1.0,
A[copy].data(),
k,
Bt[copy].data(),
btran == matrix_op_t::NoTranspose ? kAligned : nAligned,
beta,
C_ref[copy].data(),
n);
#else
cblas_sgemm_ref(
matrix_op_t::NoTranspose,
matrix_op_t::NoTranspose,
m,
n,
k,
1.0,
A[copy].data(),
k,
Bt[copy].data(),
(btran == matrix_op_t::NoTranspose) ? kAligned : nAligned,
beta,
C_ref[copy].data(),
n);
#endif
}
},
3,
NITER,
[&]() {
if (flush) {
int copy = num_instances == 1 ? 0 : fbgemm_get_thread_num();
cache_evict(A[copy]);
cache_evict(Bt[copy]);
cache_evict(C_ref[copy]);
}
},
// Use OpenMP if num instances > 1
num_instances > 1);
gflops = nflops / ttot / 1e9;
gbs = nbytes / ttot / 1e9;
printf(
"\n%30s m = %5d n = %5d k = %5d Gflops = %8.4lf GBytes = %8.4lf\n",
type.c_str(),
m,
n,
k,
gflops * repetitions,
gbs * repetitions);
#ifdef USE_MKL
}
#endif
type = "FBP_" + std::string(typeid(btype).name());
ttot = measureWithWarmup(
[&]() {
// When executing in data decomposition (single-instance) mode
// Different threads will access different regions of the same
// matrices. Thus, copy to be used is always 0. The numbers of
// threads would be the as number of threads in the parallel
// region.
// When running in functional decomposition (multi-instance) mode
// different matrices are used. The copy to be used selected by
// thread_id (thread_num), and the number of threads performance
// the compute of the same instance is 1.
int copy = num_instances == 1 ? 0 : fbgemm_get_thread_num();
int num_threads = num_instances == 1 ? fbgemm_get_num_threads() : 1;
int tid = num_instances == 1 ? fbgemm_get_thread_num() : 0;
for (int i = 0; i < repetitions; ++i) {
cblas_gemm_compute(
matrix_op_t::NoTranspose,
m,
A[copy].data(),
*Bp[copy],
beta,
C_fb[copy].data(),
tid,
num_threads);
}
},
3,
NITER,
[&]() {
if (flush) {
int copy = num_instances == 1 ? 0 : fbgemm_get_thread_num();
cache_evict(A[copy]);
cache_evict(*Bp[copy]);
cache_evict(C_fb[copy]);
}
},
true /*useOpenMP*/);
gflops = nflops / ttot / 1e9;
gbs = nbytes / ttot / 1e9;
printf(
"%30s m = %5d n = %5d k = %5d Gflops = %8.4lf GBytes = %8.4lf\n",
type.c_str(),
m,
n,
k,
gflops * repetitions,
gbs * repetitions);
}
}
aligned_vector<float> getRandomSparseVector(
unsigned size,
float fractionNonZeros = 1.0);
template <typename T>
aligned_vector<T> getRandomBlockSparseMatrix(
int Rows,
int Cols,
float fractionNonZerosBlocks = 1.0,
int RowBlockSize = 4,
int ColBlockSize = 1,
T low = 0,
T high = 9);
} // namespace fbgemm
|
Example_declare_target.3.c | /*
* @@name: declare_target.3c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_4.0
*/
#define N 1000
#pragma omp declare target
float p[N], v1[N], v2[N];
#pragma omp end declare target
extern void init(float *, float *, int);
extern void output(float *, int);
void vec_mult()
{
int i;
init(v1, v2, N);
#pragma omp target update to(v1, v2)
#pragma omp target
#pragma omp parallel for
for (i=0; i<N; i++)
p[i] = v1[i] * v2[i];
#pragma omp target update from(p)
output(p, N);
}
|
data.h | /*!
* Copyright (c) 2015 by Contributors
* \file data.h
* \brief The input data structure of xgboost.
* \author Tianqi Chen
*/
#ifndef XGBOOST_DATA_H_
#define XGBOOST_DATA_H_
#include <dmlc/base.h>
#include <dmlc/data.h>
#include <rabit/rabit.h>
#include <cstring>
#include <memory>
#include <numeric>
#include <algorithm>
#include <string>
#include <vector>
#include "./base.h"
#include "../../src/common/span.h"
#include "../../src/common/group_data.h"
#include "../../src/common/host_device_vector.h"
namespace xgboost {
// forward declare learner.
class LearnerImpl;
/*! \brief data type accepted by xgboost interface */
enum DataType {
kFloat32 = 1,
kDouble = 2,
kUInt32 = 3,
kUInt64 = 4
};
/*!
* \brief Meta information about dataset, always sit in memory.
*/
class MetaInfo {
public:
/*! \brief number of rows in the data */
uint64_t num_row_{0};
/*! \brief number of columns in the data */
uint64_t num_col_{0};
/*! \brief number of nonzero entries in the data */
uint64_t num_nonzero_{0};
/*! \brief label of each instance */
HostDeviceVector<bst_float> labels_;
/*!
* \brief specified root index of each instance,
* can be used for multi task setting
*/
std::vector<bst_uint> root_index_;
/*!
* \brief the index of begin and end of a group
* needed when the learning task is ranking.
*/
std::vector<bst_uint> group_ptr_;
/*! \brief weights of each instance, optional */
HostDeviceVector<bst_float> weights_;
/*! \brief session-id of each instance, optional */
std::vector<uint64_t> qids_;
/*!
* \brief initialized margins,
* if specified, xgboost will start from this init margin
* can be used to specify initial prediction to boost from.
*/
HostDeviceVector<bst_float> base_margin_;
/*! \brief version flag, used to check version of this info */
static const int kVersion = 2;
/*! \brief version that introduced qid field */
static const int kVersionQidAdded = 2;
/*! \brief default constructor */
MetaInfo() = default;
/*!
* \brief Get weight of each instances.
* \param i Instance index.
* \return The weight.
*/
inline bst_float GetWeight(size_t i) const {
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
}
/*!
* \brief Get the root index of i-th instance.
* \param i Instance index.
* \return The pre-defined root index of i-th instance.
*/
inline unsigned GetRoot(size_t i) const {
return root_index_.size() != 0 ? root_index_[i] : 0U;
}
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
inline const std::vector<size_t>& LabelAbsSort() const {
if (label_order_cache_.size() == labels_.Size()) {
return label_order_cache_;
}
label_order_cache_.resize(labels_.Size());
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
const auto& l = labels_.HostVector();
XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
return label_order_cache_;
}
/*! \brief clear all the information */
void Clear();
/*!
* \brief Load the Meta info from binary stream.
* \param fi The input stream
*/
void LoadBinary(dmlc::Stream* fi);
/*!
* \brief Save the Meta info to binary stream
* \param fo The output stream.
*/
void SaveBinary(dmlc::Stream* fo) const;
/*!
* \brief Set information in the meta info.
* \param key The key of the information.
* \param dptr The data pointer of the source array.
* \param dtype The type of the source data.
* \param num Number of elements in the source array.
*/
void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num);
private:
/*! \brief argsort of labels */
mutable std::vector<size_t> label_order_cache_;
};
/*! \brief Element from a sparse vector */
struct Entry {
/*! \brief feature index */
bst_uint index;
/*! \brief feature value */
bst_float fvalue;
/*! \brief default constructor */
Entry() = default;
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
*/
Entry(bst_uint index, bst_float fvalue) : index(index), fvalue(fvalue) {}
/*! \brief reversely compare feature values */
inline static bool CmpValue(const Entry& a, const Entry& b) {
return a.fvalue < b.fvalue;
}
inline bool operator==(const Entry& other) const {
return (this->index == other.index && this->fvalue == other.fvalue);
}
};
/*!
* \brief In-memory storage unit of sparse batch, stored in CSR format.
*/
class SparsePage {
public:
// Offset for each row.
HostDeviceVector<size_t> offset;
/*! \brief the data of the segments */
HostDeviceVector<Entry> data;
size_t base_rowid;
/*! \brief an instance of sparse vector in the batch */
using Inst = common::Span<Entry const>;
/*! \brief get i-th row from the batch */
inline Inst operator[](size_t i) const {
const auto& data_vec = data.HostVector();
const auto& offset_vec = offset.HostVector();
size_t size;
// in distributed mode, some partitions may not get any instance for a feature. Therefore
// we should set the size as zero
if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) {
size = 0;
} else {
size = offset_vec[i + 1] - offset_vec[i];
}
return {data_vec.data() + offset_vec[i],
static_cast<Inst::index_type>(size)};
}
/*! \brief constructor */
SparsePage() {
this->Clear();
}
/*! \return number of instance in the page */
inline size_t Size() const {
return offset.Size() - 1;
}
/*! \return estimation of memory cost of this page */
inline size_t MemCostBytes() const {
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
}
/*! \brief clear the page */
inline void Clear() {
base_rowid = 0;
auto& offset_vec = offset.HostVector();
offset_vec.clear();
offset_vec.push_back(0);
data.HostVector().clear();
}
SparsePage GetTranspose(int num_columns) const {
SparsePage transpose;
common::ParallelGroupBuilder<Entry> builder(&transpose.offset.HostVector(),
&transpose.data.HostVector());
const int nthread = omp_get_max_threads();
builder.InitBudget(num_columns, nthread);
long batch_size = static_cast<long>(this->Size()); // NOLINT(*)
#pragma omp parallel for schedule(static)
for (long i = 0; i < batch_size; ++i) { // NOLINT(*)
int tid = omp_get_thread_num();
auto inst = (*this)[i];
for (bst_uint j = 0; j < inst.size(); ++j) {
builder.AddBudget(inst[j].index, tid);
}
}
builder.InitStorage();
#pragma omp parallel for schedule(static)
for (long i = 0; i < batch_size; ++i) { // NOLINT(*)
int tid = omp_get_thread_num();
auto inst = (*this)[i];
for (bst_uint j = 0; j < inst.size(); ++j) {
builder.Push(
inst[j].index,
Entry(static_cast<bst_uint>(this->base_rowid + i), inst[j].fvalue),
tid);
}
}
return transpose;
}
void SortRows() {
auto ncol = static_cast<bst_omp_uint>(this->Size());
#pragma omp parallel for schedule(dynamic, 1)
for (bst_omp_uint i = 0; i < ncol; ++i) {
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
std::sort(
this->data.HostVector().begin() + this->offset.HostVector()[i],
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
Entry::CmpValue);
}
}
}
/*!
* \brief Push row block into the page.
* \param batch the row batch.
*/
void Push(const dmlc::RowBlock<uint32_t>& batch);
/*!
* \brief Push a sparse page
* \param batch the row page
*/
void Push(const SparsePage &batch);
/*!
* \brief Push a SparsePage stored in CSC format
* \param batch The row batch to be pushed
*/
void PushCSC(const SparsePage& batch);
/*!
* \brief Push one instance into page
* \param inst an instance row
*/
inline void Push(const Inst &inst) {
auto& data_vec = data.HostVector();
auto& offset_vec = offset.HostVector();
offset_vec.push_back(offset_vec.back() + inst.size());
size_t begin = data_vec.size();
data_vec.resize(begin + inst.size());
if (inst.size() != 0) {
std::memcpy(dmlc::BeginPtr(data_vec) + begin, inst.data(),
sizeof(Entry) * inst.size());
}
}
size_t Size() { return offset.Size() - 1; }
};
class BatchIteratorImpl {
public:
virtual ~BatchIteratorImpl() {}
virtual BatchIteratorImpl* Clone() = 0;
virtual SparsePage& operator*() = 0;
virtual const SparsePage& operator*() const = 0;
virtual void operator++() = 0;
virtual bool AtEnd() const = 0;
};
class BatchIterator {
public:
using iterator_category = std::forward_iterator_tag;
explicit BatchIterator(BatchIteratorImpl* impl) { impl_.reset(impl); }
BatchIterator(const BatchIterator& other) {
if (other.impl_) {
impl_.reset(other.impl_->Clone());
} else {
impl_.reset();
}
}
void operator++() {
CHECK(impl_ != nullptr);
++(*impl_);
}
SparsePage& operator*() {
CHECK(impl_ != nullptr);
return *(*impl_);
}
const SparsePage& operator*() const {
CHECK(impl_ != nullptr);
return *(*impl_);
}
bool operator!=(const BatchIterator& rhs) const {
CHECK(impl_ != nullptr);
return !impl_->AtEnd();
}
bool AtEnd() const {
CHECK(impl_ != nullptr);
return impl_->AtEnd();
}
private:
std::unique_ptr<BatchIteratorImpl> impl_;
};
class BatchSet {
public:
explicit BatchSet(BatchIterator begin_iter) : begin_iter_(begin_iter) {}
BatchIterator begin() { return begin_iter_; }
BatchIterator end() { return BatchIterator(nullptr); }
private:
BatchIterator begin_iter_;
};
/*!
* \brief This is data structure that user can pass to DMatrix::Create
* to create a DMatrix for training, user can create this data structure
* for customized Data Loading on single machine.
*
* On distributed setting, usually an customized dmlc::Parser is needed instead.
*/
class DataSource : public dmlc::DataIter<SparsePage> {
public:
/*!
* \brief Meta information about the dataset
* The subclass need to be able to load this correctly from data.
*/
MetaInfo info;
};
/*!
* \brief A vector-like structure to represent set of rows.
* But saves the memory when all rows are in the set (common case in xgb)
*/
class RowSet {
public:
/*! \return i-th row index */
inline bst_uint operator[](size_t i) const;
/*! \return the size of the set. */
inline size_t Size() const;
/*! \brief push the index back to the set */
inline void PushBack(bst_uint i);
/*! \brief clear the set */
inline void Clear();
/*!
* \brief save rowset to file.
* \param fo The file to be saved.
*/
inline void Save(dmlc::Stream* fo) const;
/*!
* \brief Load rowset from file.
* \param fi The file to be loaded.
* \return if read is successful.
*/
inline bool Load(dmlc::Stream* fi);
/*! \brief constructor */
RowSet() = default;
private:
/*! \brief The internal data structure of size */
uint64_t size_{0};
/*! \brief The internal data structure of row set if not all*/
std::vector<bst_uint> rows_;
};
/*!
* \brief Internal data structured used by XGBoost during training.
* There are two ways to create a customized DMatrix that reads in user defined-format.
*
* - Provide a dmlc::Parser and pass into the DMatrix::Create
* - Alternatively, if data can be represented by an URL, define a new dmlc::Parser and register by DMLC_REGISTER_DATA_PARSER;
* - This works best for user defined data input source, such as data-base, filesystem.
* - Provide a DataSource, that can be passed to DMatrix::Create
* This can be used to re-use inmemory data structure into DMatrix.
*/
class DMatrix {
public:
/*! \brief default constructor */
DMatrix() = default;
/*! \brief meta information of the dataset */
virtual MetaInfo& Info() = 0;
/*! \brief meta information of the dataset */
virtual const MetaInfo& Info() const = 0;
/**
* \brief Gets row batches. Use range based for loop over BatchSet to access individual batches.
*/
virtual BatchSet GetRowBatches() = 0;
virtual BatchSet GetSortedColumnBatches() = 0;
virtual BatchSet GetColumnBatches() = 0;
// the following are column meta data, should be able to answer them fast.
/*! \return Whether the data columns single column block. */
virtual bool SingleColBlock() const = 0;
/*! \brief get column density */
virtual float GetColDensity(size_t cidx) = 0;
/*! \brief virtual destructor */
virtual ~DMatrix() = default;
/*!
* \brief Save DMatrix to local file.
* The saved file only works for non-sharded dataset(single machine training).
* This API is deprecated and dis-encouraged to use.
* \param fname The file name to be saved.
* \return The created DMatrix.
*/
virtual void SaveToLocalFile(const std::string& fname);
/*!
* \brief Load DMatrix from URI.
* \param uri The URI of input.
* \param silent Whether print information during loading.
* \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode.
* \param file_format The format type of the file, used for dmlc::Parser::Create.
* By default "auto" will be able to load in both local binary file.
* \param page_size Page size for external memory.
* \return The created DMatrix.
*/
static DMatrix* Load(const std::string& uri,
bool silent,
bool load_row_split,
const std::string& file_format = "auto",
const size_t page_size = kPageSize);
/*!
* \brief create a new DMatrix, by wrapping a row_iterator, and meta info.
* \param source The source iterator of the data, the create function takes ownership of the source.
* \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode.
* This can be nullptr for common cases, and in-memory mode will be used.
* \return a Created DMatrix.
*/
static DMatrix* Create(std::unique_ptr<DataSource>&& source,
const std::string& cache_prefix = "");
/*!
* \brief Create a DMatrix by loading data from parser.
* Parser can later be deleted after the DMatrix i created.
* \param parser The input data parser
* \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode.
* This can be nullptr for common cases, and in-memory mode will be used.
* \param page_size Page size for external memory.
* \sa dmlc::Parser
* \note dmlc-core provides efficient distributed data parser for libsvm format.
* User can create and register customized parser to load their own format using DMLC_REGISTER_DATA_PARSER.
* See "dmlc-core/include/dmlc/data.h" for detail.
* \return A created DMatrix.
*/
static DMatrix* Create(dmlc::Parser<uint32_t>* parser,
const std::string& cache_prefix = "",
const size_t page_size = kPageSize);
/*! \brief page size 32 MB */
static const size_t kPageSize = 32UL << 20UL;
};
// implementation of inline functions
inline bst_uint RowSet::operator[](size_t i) const {
return rows_.size() == 0 ? static_cast<bst_uint>(i) : rows_[i];
}
inline size_t RowSet::Size() const {
return size_;
}
inline void RowSet::Clear() {
rows_.clear(); size_ = 0;
}
inline void RowSet::PushBack(bst_uint i) {
if (rows_.size() == 0) {
if (i == size_) {
++size_; return;
} else {
rows_.resize(size_);
for (size_t i = 0; i < size_; ++i) {
rows_[i] = static_cast<bst_uint>(i);
}
}
}
rows_.push_back(i);
++size_;
}
inline void RowSet::Save(dmlc::Stream* fo) const {
fo->Write(rows_);
fo->Write(&size_, sizeof(size_));
}
inline bool RowSet::Load(dmlc::Stream* fi) {
if (!fi->Read(&rows_)) return false;
if (rows_.size() != 0) return true;
return fi->Read(&size_, sizeof(size_)) == sizeof(size_);
}
} // namespace xgboost
namespace dmlc {
DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true);
DMLC_DECLARE_TRAITS(has_saveload, xgboost::RowSet, true);
}
#endif // XGBOOST_DATA_H_
|
GB_unop__lnot_uint64_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__lnot_uint64_uint64)
// op(A') function: GB (_unop_tran__lnot_uint64_uint64)
// C type: uint64_t
// A type: uint64_t
// cast: uint64_t cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__lnot_uint64_uint64)
(
uint64_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = !(z != 0) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = !(z != 0) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__lnot_uint64_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
segment.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS EEEEE GGGG M M EEEEE N N TTTTT %
% SS E G MM MM E NN N T %
% SSS EEE G GGG M M M EEE N N N T %
% SS E G G M M E N NN T %
% SSSSS EEEEE GGGG M M EEEEE N N T %
% %
% %
% MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means %
% %
% Software Design %
% Cristy %
% April 1993 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Segment segments an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% c-means technique. The scale-space filter analyzes the histograms of
% the three color components of the image and identifies a set of
% classes. The extents of each class is used to coarsely segment the
% image with thresholding. The color associated with each class is
% determined by the mean color of all pixels within the extents of a
% particular class. Finally, any unclassified pixels are assigned to
% the closest class with the fuzzy c-means technique.
%
% The fuzzy c-Means algorithm can be summarized as follows:
%
% o Build a histogram, one for each color component of the image.
%
% o For each histogram, successively apply the scale-space filter and
% build an interval tree of zero crossings in the second derivative
% at each scale. Analyze this scale-space ''fingerprint'' to
% determine which peaks and valleys in the histogram are most
% predominant.
%
% o The fingerprint defines intervals on the axis of the histogram.
% Each interval contains either a minima or a maxima in the original
% signal. If each color component lies within the maxima interval,
% that pixel is considered ''classified'' and is assigned an unique
% class number.
%
% o Any pixel that fails to be classified in the above thresholding
% pass is classified using the fuzzy c-Means technique. It is
% assigned to one of the classes discovered in the histogram analysis
% phase.
%
% The fuzzy c-Means technique attempts to cluster a pixel by finding
% the local minima of the generalized within group sum of squared error
% objective function. A pixel is assigned to the closest class of
% which the fuzzy membership has a maximum value.
%
% Segment is strongly based on software written by Andy Gallo,
% University of Delaware.
%
% The following reference was used in creating this program:
%
% Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation
% Algorithm Based on the Thresholding and the Fuzzy c-Means
% Techniques", Pattern Recognition, Volume 23, Number 9, pages
% 935-952, 1990.
%
%
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#define MaxDimension 3
#define DeltaTau 0.5f
#if defined(FastClassify)
#define WeightingExponent 2.0
#define SegmentPower(ratio) (ratio)
#else
#define WeightingExponent 2.5
#define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0)));
#endif
#define Tau 5.2f
/*
Typedef declarations.
*/
typedef struct _ExtentPacket
{
double
center;
ssize_t
index,
left,
right;
} ExtentPacket;
typedef struct _Cluster
{
struct _Cluster
*next;
ExtentPacket
red,
green,
blue;
ssize_t
count,
id;
} Cluster;
typedef struct _IntervalTree
{
double
tau;
ssize_t
left,
right;
double
mean_stability,
stability;
struct _IntervalTree
*sibling,
*child;
} IntervalTree;
typedef struct _ZeroCrossing
{
double
tau,
histogram[256];
short
crossings[256];
} ZeroCrossing;
/*
Constant declarations.
*/
static const int
Blue = 2,
Green = 1,
Red = 0,
SafeMargin = 3,
TreeLength = 600;
/*
Method prototypes.
*/
static double
OptimalTau(const ssize_t *,const double,const double,const double,
const double,short *);
static ssize_t
DefineRegion(const short *,ExtentPacket *);
static void
FreeNodes(IntervalTree *),
InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *),
ScaleSpace(const ssize_t *,const double,double *),
ZeroCrossHistogram(double *,const double,short *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Classify() defines one or more classes. Each pixel is thresholded to
% determine which class it belongs to. If the class is not identified it is
% assigned to the closest class based on the fuzzy c-Means technique.
%
% The format of the Classify method is:
%
% MagickBooleanType Classify(Image *image,short **extrema,
% const double cluster_threshold,
% const double weighting_exponent,
% const MagickBooleanType verbose,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o cluster_threshold: This double represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o weighting_exponent: Specifies the membership weighting exponent.
%
% o verbose: A value greater than zero prints detailed information about
% the identified classes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType Classify(Image *image,short **extrema,
const double cluster_threshold,
const double weighting_exponent,const MagickBooleanType verbose,
ExceptionInfo *exception)
{
#define SegmentImageTag "Segment/Image"
CacheView
*image_view;
Cluster
*cluster,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickOffsetType
progress;
double
*free_squares;
MagickStatusType
status;
register ssize_t
i;
register double
*squares;
size_t
number_clusters;
ssize_t
count,
y;
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) memset(&red,0,sizeof(red));
(void) memset(&green,0,sizeof(green));
(void) memset(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
status=MagickTrue;
count=0;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(double) ScaleQuantumToChar(
GetPixelRed(image,p));
cluster->green.center+=(double) ScaleQuantumToChar(
GetPixelGreen(image,p));
cluster->blue.center+=(double) ScaleQuantumToChar(
GetPixelBlue(image,p));
cluster->count++;
break;
}
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,2*
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
number_clusters=(size_t) count;
if (verbose != MagickFalse)
{
/*
Print cluster statistics.
*/
(void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n");
(void) FormatLocaleFile(stdout,"===================\n\n");
(void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double)
cluster_threshold);
(void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double)
weighting_exponent);
(void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n",
(double) number_clusters);
/*
Print the total number of points per cluster.
*/
(void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n");
(void) FormatLocaleFile(stdout,"=============================\n\n");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
(void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double)
cluster->id,(double) cluster->count);
/*
Print the cluster extents.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,
"%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double)
cluster->red.left,(double) cluster->red.right,(double)
cluster->green.left,(double) cluster->green.right,(double)
cluster->blue.left,(double) cluster->blue.right);
}
/*
Print the cluster center values.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"=====================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,"%g %g %g\n",(double)
cluster->red.center,(double) cluster->green.center,(double)
cluster->blue.center);
}
(void) FormatLocaleFile(stdout,"\n");
}
if (number_clusters > 256)
ThrowBinaryException(ImageError,"TooManyClusters",image->filename);
/*
Speed up distance calculations.
*/
squares=(double *) AcquireQuantumMemory(513UL,sizeof(*squares));
if (squares == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
squares+=255;
for (i=(-255); i <= 255; i++)
squares[i]=(double) i*(double) i;
/*
Allocate image colormap.
*/
if (AcquireImageColormap(image,number_clusters,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
i=0;
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
image->colormap[i].red=(double) ScaleCharToQuantum((unsigned char)
(cluster->red.center+0.5));
image->colormap[i].green=(double) ScaleCharToQuantum((unsigned char)
(cluster->green.center+0.5));
image->colormap[i].blue=(double) ScaleCharToQuantum((unsigned char)
(cluster->blue.center+0.5));
i++;
}
/*
Do course grain classes.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Cluster
*clust;
register const PixelInfo
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,0,q);
for (clust=head; clust != (Cluster *) NULL; clust=clust->next)
{
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) >=
(clust->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) <=
(clust->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) >=
(clust->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) <=
(clust->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) >=
(clust->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) <=
(clust->blue.right+SafeMargin)))
{
/*
Classify this pixel.
*/
SetPixelIndex(image,(Quantum) clust->id,q);
break;
}
}
if (clust == (Cluster *) NULL)
{
double
distance_squared,
local_minima,
numerator,
ratio,
sum;
register ssize_t
j,
k;
/*
Compute fuzzy membership.
*/
local_minima=0.0;
for (j=0; j < (ssize_t) image->colors; j++)
{
sum=0.0;
p=image->colormap+j;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(
GetPixelRed(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[(ssize_t)
ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[(ssize_t)
ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->blue))];
numerator=distance_squared;
for (k=0; k < (ssize_t) image->colors; k++)
{
p=image->colormap+k;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(
GetPixelRed(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[
(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[
(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->blue))];
ratio=numerator/distance_squared;
sum+=SegmentPower(ratio);
}
if ((sum != 0.0) && ((1.0/sum) > local_minima))
{
/*
Classify this pixel.
*/
local_minima=1.0/sum;
SetPixelIndex(image,(Quantum) j,q);
}
}
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,
2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status&=SyncImage(image,exception);
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
squares-=255;
free_squares=squares;
free_squares=(double *) RelinquishMagickMemory(free_squares);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C r o s s i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCrossings() guarantees that an even number of zero crossings
% always lie between two crossings.
%
% The format of the ConsolidateCrossings method is:
%
% ConsolidateCrossings(ZeroCrossing *zero_crossing,
% const size_t number_crossings)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void ConsolidateCrossings(ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
register ssize_t
i,
j,
k,
l;
ssize_t
center,
correct,
count,
left,
right;
/*
Consolidate zero crossings.
*/
for (i=(ssize_t) number_crossings-1; i >= 0; i--)
for (j=0; j <= 255; j++)
{
if (zero_crossing[i].crossings[j] == 0)
continue;
/*
Find the entry that is closest to j and still preserves the
property that there are an even number of crossings between
intervals.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i+1].crossings[k] != 0)
break;
left=MagickMax(k,0);
center=j;
for (k=j+1; k < 255; k++)
if (zero_crossing[i+1].crossings[k] != 0)
break;
right=MagickMin(k,255);
/*
K is the zero crossing just left of j.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i].crossings[k] != 0)
break;
if (k < 0)
k=0;
/*
Check center for an even number of crossings between k and j.
*/
correct=(-1);
if (zero_crossing[i+1].crossings[j] != 0)
{
count=0;
for (l=k+1; l < center; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (center != k))
correct=center;
}
/*
Check left for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < left; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (left != k))
correct=left;
}
/*
Check right for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < right; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (right != k))
correct=right;
}
l=(ssize_t) zero_crossing[i].crossings[j];
zero_crossing[i].crossings[j]=0;
if (correct != -1)
zero_crossing[i].crossings[correct]=(short) l;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineRegion() defines the left and right boundaries of a peak region.
%
% The format of the DefineRegion method is:
%
% ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
%
% A description of each parameter follows.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o extents: This pointer to an ExtentPacket represent the extends
% of a particular peak or valley of a color component.
%
*/
static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
{
/*
Initialize to default values.
*/
extents->left=0;
extents->center=0.0;
extents->right=255;
/*
Find the left side (maxima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] > 0)
break;
if (extents->index > 255)
return(MagickFalse); /* no left side - no region exists */
extents->left=extents->index;
/*
Find the right side (minima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] < 0)
break;
extents->right=extents->index-1;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e r i v a t i v e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DerivativeHistogram() determines the derivative of the histogram using
% central differencing.
%
% The format of the DerivativeHistogram method is:
%
% DerivativeHistogram(const double *histogram,
% double *derivative)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of doubles representing the number
% of pixels for each intensity of a particular color component.
%
% o derivative: This array of doubles is initialized by
% DerivativeHistogram to the derivative of the histogram using central
% differencing.
%
*/
static void DerivativeHistogram(const double *histogram,
double *derivative)
{
register ssize_t
i,
n;
/*
Compute endpoints using second order polynomial interpolation.
*/
n=255;
derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]);
derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]);
/*
Compute derivative using central differencing.
*/
for (i=1; i < n; i++)
derivative[i]=(histogram[i+1]-histogram[i-1])/2.0;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e D y n a m i c T h r e s h o l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDynamicThreshold() returns the dynamic threshold for an image.
%
% The format of the GetImageDynamicThreshold method is:
%
% MagickBooleanType GetImageDynamicThreshold(const Image *image,
% const double cluster_threshold,const double smooth_threshold,
% PixelInfo *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cluster_threshold: This double represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o pixel: return the dynamic threshold here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image,
const double cluster_threshold,const double smooth_threshold,
PixelInfo *pixel,ExceptionInfo *exception)
{
Cluster
*background,
*cluster,
*object,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickBooleanType
proceed;
double
threshold;
register const Quantum
*p;
register ssize_t
i,
x;
short
*extrema[MaxDimension];
ssize_t
count,
*histogram[MaxDimension],
y;
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetPixelInfo(image,pixel);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
}
/*
Initialize histogram.
*/
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]);
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) memset(&red,0,sizeof(red));
(void) memset(&green,0,sizeof(green));
(void) memset(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
count=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(double) ScaleQuantumToChar(
GetPixelRed(image,p));
cluster->green.center+=(double) ScaleQuantumToChar(
GetPixelGreen(image,p));
cluster->blue.center+=(double) ScaleQuantumToChar(
GetPixelBlue(image,p));
cluster->count++;
break;
}
p+=GetPixelChannels(image);
}
proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y,
2*image->rows);
if (proceed == MagickFalse)
break;
}
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
object=head;
background=head;
if (count > 1)
{
object=head->next;
for (cluster=object; cluster->next != (Cluster *) NULL; )
{
if (cluster->count < object->count)
object=cluster;
cluster=cluster->next;
}
background=head->next;
for (cluster=background; cluster->next != (Cluster *) NULL; )
{
if (cluster->count > background->count)
background=cluster;
cluster=cluster->next;
}
}
if (background != (Cluster *) NULL)
{
threshold=(background->red.center+object->red.center)/2.0;
pixel->red=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->green.center+object->green.center)/2.0;
pixel->green=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->blue.center+object->blue.center)/2.0;
pixel->blue=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
}
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeHistogram() computes the histogram for an image.
%
% The format of the InitializeHistogram method is:
%
% InitializeHistogram(const Image *image,ssize_t **histogram)
%
% A description of each parameter follows.
%
% o image: Specifies a pointer to an Image structure; returned from
% ReadImage.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void InitializeHistogram(const Image *image,ssize_t **histogram,
ExceptionInfo *exception)
{
register const Quantum
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Initialize histogram.
*/
for (i=0; i <= 255; i++)
{
histogram[Red][i]=0;
histogram[Green][i]=0;
histogram[Blue][i]=0;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(image,p))]++;
histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p))]++;
histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p))]++;
p+=GetPixelChannels(image);
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e I n t e r v a l T r e e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeIntervalTree() initializes an interval tree from the lists of
% zero crossings.
%
% The format of the InitializeIntervalTree method is:
%
% InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes,
% IntervalTree *node)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void InitializeList(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
list[(*number_nodes)++]=node;
InitializeList(list,number_nodes,node->sibling);
InitializeList(list,number_nodes,node->child);
}
static void MeanStability(IntervalTree *node)
{
register IntervalTree
*child;
if (node == (IntervalTree *) NULL)
return;
node->mean_stability=0.0;
child=node->child;
if (child != (IntervalTree *) NULL)
{
register ssize_t
count;
register double
sum;
sum=0.0;
count=0;
for ( ; child != (IntervalTree *) NULL; child=child->sibling)
{
sum+=child->stability;
count++;
}
node->mean_stability=sum/(double) count;
}
MeanStability(node->sibling);
MeanStability(node->child);
}
static void Stability(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
node->stability=0.0;
else
node->stability=node->tau-(node->child)->tau;
Stability(node->sibling);
Stability(node->child);
}
static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
IntervalTree
*head,
**list,
*node,
*root;
register ssize_t
i;
ssize_t
j,
k,
left,
number_nodes;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return((IntervalTree *) NULL);
/*
The root is the entire histogram.
*/
root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root));
root->child=(IntervalTree *) NULL;
root->sibling=(IntervalTree *) NULL;
root->tau=0.0;
root->left=0;
root->right=255;
root->mean_stability=0.0;
root->stability=0.0;
(void) memset(list,0,TreeLength*sizeof(*list));
for (i=(-1); i < (ssize_t) number_crossings; i++)
{
/*
Initialize list with all nodes with no children.
*/
number_nodes=0;
InitializeList(list,&number_nodes,root);
/*
Split list.
*/
for (j=0; j < number_nodes; j++)
{
head=list[j];
left=head->left;
node=head;
for (k=head->left+1; k < head->right; k++)
{
if (zero_crossing[i+1].crossings[k] != 0)
{
if (node == head)
{
node->child=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->child));
node=node->child;
}
else
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
}
if (node == (IntervalTree *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
FreeNodes(root);
return((IntervalTree *) NULL);
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=k;
left=k;
}
}
if (left != head->left)
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
if (node == (IntervalTree *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
FreeNodes(root);
return((IntervalTree *) NULL);
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=head->right;
}
}
}
/*
Determine the stability: difference between a nodes tau and its child.
*/
Stability(root->child);
MeanStability(root->child);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(root);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p t i m a l T a u %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OptimalTau() finds the optimal tau for each band of the histogram.
%
% The format of the OptimalTau method is:
%
% double OptimalTau(const ssize_t *histogram,const double max_tau,
% const double min_tau,const double delta_tau,
% const double smooth_threshold,short *extrema)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
*/
static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->stability >= node->mean_stability)
{
list[(*number_nodes)++]=node;
ActiveNodes(list,number_nodes,node->sibling);
}
else
{
ActiveNodes(list,number_nodes,node->sibling);
ActiveNodes(list,number_nodes,node->child);
}
}
static void FreeNodes(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
FreeNodes(node->sibling);
FreeNodes(node->child);
node=(IntervalTree *) RelinquishMagickMemory(node);
}
static double OptimalTau(const ssize_t *histogram,const double max_tau,
const double min_tau,const double delta_tau,const double smooth_threshold,
short *extrema)
{
IntervalTree
**list,
*node,
*root;
MagickBooleanType
peak;
double
average_tau,
*derivative,
*second_derivative,
tau,
value;
register ssize_t
i,
x;
size_t
count,
number_crossings;
ssize_t
index,
j,
k,
number_nodes;
ZeroCrossing
*zero_crossing;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return(0.0);
/*
Allocate zero crossing list.
*/
count=(size_t) ((max_tau-min_tau)/delta_tau)+2;
zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count,
sizeof(*zero_crossing));
if (zero_crossing == (ZeroCrossing *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
return(0.0);
}
for (i=0; i < (ssize_t) count; i++)
zero_crossing[i].tau=(-1.0);
/*
Initialize zero crossing list.
*/
derivative=(double *) AcquireCriticalMemory(256*sizeof(*derivative));
second_derivative=(double *) AcquireCriticalMemory(256*
sizeof(*second_derivative));
i=0;
for (tau=max_tau; tau >= min_tau; tau-=delta_tau)
{
zero_crossing[i].tau=tau;
ScaleSpace(histogram,tau,zero_crossing[i].histogram);
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
i++;
}
/*
Add an entry for the original histogram.
*/
zero_crossing[i].tau=0.0;
for (j=0; j <= 255; j++)
zero_crossing[i].histogram[j]=(double) histogram[j];
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
number_crossings=(size_t) i;
derivative=(double *) RelinquishMagickMemory(derivative);
second_derivative=(double *) RelinquishMagickMemory(second_derivative);
/*
Ensure the scale-space fingerprints form lines in scale-space, not loops.
*/
ConsolidateCrossings(zero_crossing,number_crossings);
/*
Force endpoints to be included in the interval.
*/
for (i=0; i <= (ssize_t) number_crossings; i++)
{
for (j=0; j < 255; j++)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]);
for (j=255; j > 0; j--)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]);
}
/*
Initialize interval tree.
*/
root=InitializeIntervalTree(zero_crossing,number_crossings);
if (root == (IntervalTree *) NULL)
{
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(0.0);
}
/*
Find active nodes: stability is greater (or equal) to the mean stability of
its children.
*/
number_nodes=0;
ActiveNodes(list,&number_nodes,root->child);
/*
Initialize extrema.
*/
for (i=0; i <= 255; i++)
extrema[i]=0;
for (i=0; i < number_nodes; i++)
{
/*
Find this tau in zero crossings list.
*/
k=0;
node=list[i];
for (j=0; j <= (ssize_t) number_crossings; j++)
if (zero_crossing[j].tau == node->tau)
k=j;
/*
Find the value of the peak.
*/
peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue :
MagickFalse;
index=node->left;
value=zero_crossing[k].histogram[index];
for (x=node->left; x <= node->right; x++)
{
if (peak != MagickFalse)
{
if (zero_crossing[k].histogram[x] > value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
else
if (zero_crossing[k].histogram[x] < value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
for (x=node->left; x <= node->right; x++)
{
if (index == 0)
index=256;
if (peak != MagickFalse)
extrema[x]=(short) index;
else
extrema[x]=(short) (-index);
}
}
/*
Determine the average tau.
*/
average_tau=0.0;
for (i=0; i < number_nodes; i++)
average_tau+=list[i]->tau;
average_tau/=(double) number_nodes;
/*
Relinquish resources.
*/
FreeNodes(root);
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(average_tau);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S c a l e S p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleSpace() performs a scale-space filter on the 1D histogram.
%
% The format of the ScaleSpace method is:
%
% ScaleSpace(const ssize_t *histogram,const double tau,
% double *scale_histogram)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of doubles representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void ScaleSpace(const ssize_t *histogram,const double tau,
double *scale_histogram)
{
double
alpha,
beta,
*gamma,
sum;
register ssize_t
u,
x;
gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma));
if (gamma == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateGammaMap");
alpha=PerceptibleReciprocal(tau*sqrt(2.0*MagickPI));
beta=(-1.0*PerceptibleReciprocal(2.0*tau*tau));
for (x=0; x <= 255; x++)
gamma[x]=0.0;
for (x=0; x <= 255; x++)
{
gamma[x]=exp((double) beta*x*x);
if (gamma[x] < MagickEpsilon)
break;
}
for (x=0; x <= 255; x++)
{
sum=0.0;
for (u=0; u <= 255; u++)
sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)];
scale_histogram[x]=alpha*sum;
}
gamma=(double *) RelinquishMagickMemory(gamma);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e g m e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SegmentImage() segment an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% C-means technique.
%
% The format of the SegmentImage method is:
%
% MagickBooleanType SegmentImage(Image *image,
% const ColorspaceType colorspace,const MagickBooleanType verbose,
% const double cluster_threshold,const double smooth_threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o colorspace: Indicate the colorspace.
%
% o verbose: Set to MagickTrue to print detailed information about the
% identified classes.
%
% o cluster_threshold: This represents the minimum number of pixels
% contained in a hexahedra before it can be considered valid (expressed
% as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SegmentImage(Image *image,
const ColorspaceType colorspace,const MagickBooleanType verbose,
const double cluster_threshold,const double smooth_threshold,
ExceptionInfo *exception)
{
ColorspaceType
previous_colorspace;
MagickBooleanType
status;
register ssize_t
i;
short
*extrema[MaxDimension];
ssize_t
*histogram[MaxDimension];
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename)
}
}
/*
Initialize histogram.
*/
previous_colorspace=image->colorspace;
(void) TransformImageColorspace(image,colorspace,exception);
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]);
/*
Classify using the fuzzy c-Means technique.
*/
status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose,
exception);
(void) TransformImageColorspace(image,previous_colorspace,exception);
/*
Relinquish resources.
*/
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Z e r o C r o s s H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroCrossHistogram() find the zero crossings in a histogram and marks
% directions as: 1 is negative to positive; 0 is zero crossing; and -1
% is positive to negative.
%
% The format of the ZeroCrossHistogram method is:
%
% ZeroCrossHistogram(double *second_derivative,
% const double smooth_threshold,short *crossings)
%
% A description of each parameter follows.
%
% o second_derivative: Specifies an array of doubles representing the
% second derivative of the histogram of a particular color component.
%
% o crossings: This array of integers is initialized with
% -1, 0, or 1 representing the slope of the first derivative of the
% of a particular color component.
%
*/
static void ZeroCrossHistogram(double *second_derivative,
const double smooth_threshold,short *crossings)
{
register ssize_t
i;
ssize_t
parity;
/*
Merge low numbers to zero to help prevent noise.
*/
for (i=0; i <= 255; i++)
if ((second_derivative[i] < smooth_threshold) &&
(second_derivative[i] >= -smooth_threshold))
second_derivative[i]=0.0;
/*
Mark zero crossings.
*/
parity=0;
for (i=0; i <= 255; i++)
{
crossings[i]=0;
if (second_derivative[i] < 0.0)
{
if (parity > 0)
crossings[i]=(-1);
parity=1;
}
else
if (second_derivative[i] > 0.0)
{
if (parity < 0)
crossings[i]=1;
parity=(-1);
}
}
}
|
GB_unop__ceil_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ceil_fc64_fc64)
// op(A') function: GB (_unop_tran__ceil_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = GB_cceil (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cceil (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = GB_cceil (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CEIL || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ceil_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_cceil (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_cceil (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ceil_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hopscotch_hash.h | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/**
* Hopscotch hash is modified from the code downloaded from
* https://sites.google.com/site/cconcurrencypackage/hopscotch-hashing
* with the following terms of usage
*/
////////////////////////////////////////////////////////////////////////////////
//TERMS OF USAGE
//------------------------------------------------------------------------------
//
// Permission to use, copy, modify and distribute this software and
// its documentation for any purpose is hereby granted without fee,
// provided that due acknowledgments to the authors are provided and
// this permission notice appears in all copies of the software.
// The software is provided "as is". There is no warranty of any kind.
//
//Authors:
// Maurice Herlihy
// Brown University
// and
// Nir Shavit
// Tel-Aviv University
// and
// Moran Tzafrir
// Tel-Aviv University
//
// Date: July 15, 2008.
//
////////////////////////////////////////////////////////////////////////////////
// Programmer : Moran Tzafrir (MoranTza@gmail.com)
// Modified : Jongsoo Park (jongsoo.park@intel.com)
// Oct 1, 2015.
//
////////////////////////////////////////////////////////////////////////////////
#ifndef hypre_HOPSCOTCH_HASH_HEADER
#define hypre_HOPSCOTCH_HASH_HEADER
//#include <strings.h>
#include <string.h>
#include <stdio.h>
#include <limits.h>
#include <math.h>
#ifdef HYPRE_USING_OPENMP
#include <omp.h>
#endif
#include "_hypre_utilities.h"
// Potentially architecture specific features used here:
// __sync_val_compare_and_swap
#ifdef __cplusplus
extern "C" {
#endif
/******************************************************************************
* This next section of code is here instead of in _hypre_utilities.h to get
* around some portability issues with Visual Studio. By putting it here, we
* can explicitly include this '.h' file in a few files in hypre and compile
* them with C++ instead of C (VS does not support C99 'inline').
******************************************************************************/
#ifdef HYPRE_USING_ATOMIC
static inline HYPRE_Int
hypre_compare_and_swap( HYPRE_Int *ptr, HYPRE_Int oldval, HYPRE_Int newval )
{
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100
return __sync_val_compare_and_swap(ptr, oldval, newval);
//#elif defind _MSC_VER
//return _InterlockedCompareExchange((long *)ptr, newval, oldval);
//#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__)
// JSP: not many compilers have implemented this, so comment out for now
//_Atomic HYPRE_Int *atomic_ptr = ptr;
//atomic_compare_exchange_strong(atomic_ptr, &oldval, newval);
//return oldval;
#endif
}
static inline HYPRE_Int
hypre_fetch_and_add( HYPRE_Int *ptr, HYPRE_Int value )
{
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100
return __sync_fetch_and_add(ptr, value);
//#elif defined _MSC_VER
//return _InterlockedExchangeAdd((long *)ptr, value);
//#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__)
// JSP: not many compilers have implemented this, so comment out for now
//_Atomic HYPRE_Int *atomic_ptr = ptr;
//return atomic_fetch_add(atomic_ptr, value);
#endif
}
#else // !HYPRE_USING_ATOMIC
static inline HYPRE_Int
hypre_compare_and_swap( HYPRE_Int *ptr, HYPRE_Int oldval, HYPRE_Int newval )
{
if (*ptr == oldval)
{
*ptr = newval;
return oldval;
}
else { return *ptr; }
}
static inline HYPRE_Int
hypre_fetch_and_add( HYPRE_Int *ptr, HYPRE_Int value )
{
HYPRE_Int oldval = *ptr;
*ptr += value;
return oldval;
}
#endif // !HYPRE_USING_ATOMIC
/******************************************************************************/
// Constants ................................................................
#define HYPRE_HOPSCOTCH_HASH_HOP_RANGE (32)
#define HYPRE_HOPSCOTCH_HASH_INSERT_RANGE (4*1024)
#define HYPRE_HOPSCOTCH_HASH_EMPTY (0)
#define HYPRE_HOPSCOTCH_HASH_BUSY (1)
// Small Utilities ..........................................................
static inline HYPRE_Int
first_lsb_bit_indx( hypre_uint x )
{
HYPRE_Int pos;
#if defined(_MSC_VER) || defined(__MINGW64__)
if (x == 0)
{
pos = 0;
}
else
{
for (pos = 1; !(x & 1); ++pos)
{
x >>= 1;
}
}
#else
pos = ffs(x);
#endif
return (pos - 1);
}
/**
* hypre_Hash is adapted from xxHash with the following license.
*/
/*
xxHash - Extremely Fast Hash algorithm
Header File
Copyright (C) 2012-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- xxHash source repository : https://github.com/Cyan4973/xxHash
*/
/***************************************
* Constants
***************************************/
#define HYPRE_XXH_PRIME32_1 2654435761U
#define HYPRE_XXH_PRIME32_2 2246822519U
#define HYPRE_XXH_PRIME32_3 3266489917U
#define HYPRE_XXH_PRIME32_4 668265263U
#define HYPRE_XXH_PRIME32_5 374761393U
#define HYPRE_XXH_PRIME64_1 11400714785074694791ULL
#define HYPRE_XXH_PRIME64_2 14029467366897019727ULL
#define HYPRE_XXH_PRIME64_3 1609587929392839161ULL
#define HYPRE_XXH_PRIME64_4 9650029242287828579ULL
#define HYPRE_XXH_PRIME64_5 2870177450012600261ULL
#define HYPRE_XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
#define HYPRE_XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
#if defined(HYPRE_MIXEDINT) || defined(HYPRE_BIGINT)
static inline HYPRE_BigInt
hypre_BigHash( HYPRE_BigInt input )
{
hypre_ulongint h64 = HYPRE_XXH_PRIME64_5 + sizeof(input);
hypre_ulongint k1 = input;
k1 *= HYPRE_XXH_PRIME64_2;
k1 = HYPRE_XXH_rotl64(k1, 31);
k1 *= HYPRE_XXH_PRIME64_1;
h64 ^= k1;
h64 = HYPRE_XXH_rotl64(h64, 27) * HYPRE_XXH_PRIME64_1 + HYPRE_XXH_PRIME64_4;
h64 ^= h64 >> 33;
h64 *= HYPRE_XXH_PRIME64_2;
h64 ^= h64 >> 29;
h64 *= HYPRE_XXH_PRIME64_3;
h64 ^= h64 >> 32;
#ifndef NDEBUG
if (HYPRE_HOPSCOTCH_HASH_EMPTY == h64)
{
hypre_printf("hash(%lld) = %d\n", h64, HYPRE_HOPSCOTCH_HASH_EMPTY);
hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h64);
}
#endif
return h64;
}
#else
static inline HYPRE_Int
hypre_BigHash(HYPRE_Int input)
{
hypre_uint h32 = HYPRE_XXH_PRIME32_5 + sizeof(input);
// 1665863975 is added to input so that
// only -1073741824 gives HYPRE_HOPSCOTCH_HASH_EMPTY.
// Hence, we're fine as long as key is non-negative.
h32 += (input + 1665863975) * HYPRE_XXH_PRIME32_3;
h32 = HYPRE_XXH_rotl32(h32, 17) * HYPRE_XXH_PRIME32_4;
h32 ^= h32 >> 15;
h32 *= HYPRE_XXH_PRIME32_2;
h32 ^= h32 >> 13;
h32 *= HYPRE_XXH_PRIME32_3;
h32 ^= h32 >> 16;
//hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h32);
return h32;
}
#endif
#ifdef HYPRE_BIGINT
static inline HYPRE_Int
hypre_Hash(HYPRE_Int input)
{
hypre_ulongint h64 = HYPRE_XXH_PRIME64_5 + sizeof(input);
hypre_ulongint k1 = input;
k1 *= HYPRE_XXH_PRIME64_2;
k1 = HYPRE_XXH_rotl64(k1, 31);
k1 *= HYPRE_XXH_PRIME64_1;
h64 ^= k1;
h64 = HYPRE_XXH_rotl64(h64, 27) * HYPRE_XXH_PRIME64_1 + HYPRE_XXH_PRIME64_4;
h64 ^= h64 >> 33;
h64 *= HYPRE_XXH_PRIME64_2;
h64 ^= h64 >> 29;
h64 *= HYPRE_XXH_PRIME64_3;
h64 ^= h64 >> 32;
#ifndef NDEBUG
if (HYPRE_HOPSCOTCH_HASH_EMPTY == h64)
{
hypre_printf("hash(%lld) = %d\n", h64, HYPRE_HOPSCOTCH_HASH_EMPTY);
hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h64);
}
#endif
return h64;
}
#else
static inline HYPRE_Int
hypre_Hash(HYPRE_Int input)
{
hypre_uint h32 = HYPRE_XXH_PRIME32_5 + sizeof(input);
// 1665863975 is added to input so that
// only -1073741824 gives HYPRE_HOPSCOTCH_HASH_EMPTY.
// Hence, we're fine as long as key is non-negative.
h32 += (input + 1665863975) * HYPRE_XXH_PRIME32_3;
h32 = HYPRE_XXH_rotl32(h32, 17) * HYPRE_XXH_PRIME32_4;
h32 ^= h32 >> 15;
h32 *= HYPRE_XXH_PRIME32_2;
h32 ^= h32 >> 13;
h32 *= HYPRE_XXH_PRIME32_3;
h32 ^= h32 >> 16;
//hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h32);
return h32;
}
#endif
static inline void
hypre_UnorderedIntSetFindCloserFreeBucket( hypre_UnorderedIntSet *s,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *start_seg,
#endif
HYPRE_Int *free_bucket,
HYPRE_Int *free_dist )
{
HYPRE_Int move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1);
HYPRE_Int move_free_dist;
for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist)
{
hypre_uint start_hop_info = s->hopInfo[move_bucket];
HYPRE_Int move_new_free_dist = -1;
hypre_uint mask = 1;
HYPRE_Int i;
for (i = 0; i < move_free_dist; ++i, mask <<= 1)
{
if (mask & start_hop_info)
{
move_new_free_dist = i;
break;
}
}
if (-1 != move_new_free_dist)
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* move_segment = &(s->segments[move_bucket & s->segmentMask]);
if (start_seg != move_segment)
{
omp_set_lock(&move_segment->lock);
}
#endif
if (start_hop_info == s->hopInfo[move_bucket])
{
// new_free_bucket -> free_bucket and empty new_free_bucket
HYPRE_Int new_free_bucket = move_bucket + move_new_free_dist;
s->key[*free_bucket] = s->key[new_free_bucket];
s->hash[*free_bucket] = s->hash[new_free_bucket];
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
++move_segment->timestamp;
#pragma omp flush
#endif
s->hopInfo[move_bucket] |= (1U << move_free_dist);
s->hopInfo[move_bucket] &= ~(1U << move_new_free_dist);
*free_bucket = new_free_bucket;
*free_dist -= move_free_dist - move_new_free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (start_seg != move_segment)
{
omp_unset_lock(&move_segment->lock);
}
#endif
return;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (start_seg != move_segment)
{
omp_unset_lock(&move_segment->lock);
}
#endif
}
++move_bucket;
}
*free_bucket = -1;
*free_dist = 0;
}
static inline void
hypre_UnorderedBigIntSetFindCloserFreeBucket( hypre_UnorderedBigIntSet *s,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *start_seg,
#endif
HYPRE_Int *free_bucket,
HYPRE_Int *free_dist )
{
HYPRE_Int move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1);
HYPRE_Int move_free_dist;
for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist)
{
hypre_uint start_hop_info = s->hopInfo[move_bucket];
HYPRE_Int move_new_free_dist = -1;
hypre_uint mask = 1;
HYPRE_Int i;
for (i = 0; i < move_free_dist; ++i, mask <<= 1)
{
if (mask & start_hop_info)
{
move_new_free_dist = i;
break;
}
}
if (-1 != move_new_free_dist)
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* move_segment = &(s->segments[move_bucket & s->segmentMask]);
if (start_seg != move_segment)
{
omp_set_lock(&move_segment->lock);
}
#endif
if (start_hop_info == s->hopInfo[move_bucket])
{
// new_free_bucket -> free_bucket and empty new_free_bucket
HYPRE_Int new_free_bucket = move_bucket + move_new_free_dist;
s->key[*free_bucket] = s->key[new_free_bucket];
s->hash[*free_bucket] = s->hash[new_free_bucket];
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
++move_segment->timestamp;
#pragma omp flush
#endif
s->hopInfo[move_bucket] |= (1U << move_free_dist);
s->hopInfo[move_bucket] &= ~(1U << move_new_free_dist);
*free_bucket = new_free_bucket;
*free_dist -= move_free_dist - move_new_free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (start_seg != move_segment)
{
omp_unset_lock(&move_segment->lock);
}
#endif
return;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (start_seg != move_segment)
{
omp_unset_lock(&move_segment->lock);
}
#endif
}
++move_bucket;
}
*free_bucket = -1;
*free_dist = 0;
}
static inline void
hypre_UnorderedIntMapFindCloserFreeBucket( hypre_UnorderedIntMap *m,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *start_seg,
#endif
hypre_HopscotchBucket **free_bucket,
HYPRE_Int *free_dist)
{
hypre_HopscotchBucket* move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1);
HYPRE_Int move_free_dist;
for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist)
{
hypre_uint start_hop_info = move_bucket->hopInfo;
HYPRE_Int move_new_free_dist = -1;
hypre_uint mask = 1;
HYPRE_Int i;
for (i = 0; i < move_free_dist; ++i, mask <<= 1)
{
if (mask & start_hop_info)
{
move_new_free_dist = i;
break;
}
}
if (-1 != move_new_free_dist)
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* move_segment = &(m->segments[(move_bucket - m->table) & m->segmentMask]);
if (start_seg != move_segment)
{
omp_set_lock(&move_segment->lock);
}
#endif
if (start_hop_info == move_bucket->hopInfo)
{
// new_free_bucket -> free_bucket and empty new_free_bucket
hypre_HopscotchBucket* new_free_bucket = move_bucket + move_new_free_dist;
(*free_bucket)->data = new_free_bucket->data;
(*free_bucket)->key = new_free_bucket->key;
(*free_bucket)->hash = new_free_bucket->hash;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
++move_segment->timestamp;
#pragma omp flush
#endif
move_bucket->hopInfo |= (1U << move_free_dist);
move_bucket->hopInfo &= ~(1U << move_new_free_dist);
*free_bucket = new_free_bucket;
*free_dist -= move_free_dist - move_new_free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (start_seg != move_segment)
{
omp_unset_lock(&move_segment->lock);
}
#endif
return;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (start_seg != move_segment)
{
omp_unset_lock(&move_segment->lock);
}
#endif
}
++move_bucket;
}
*free_bucket = NULL;
*free_dist = 0;
}
static inline void
hypre_UnorderedBigIntMapFindCloserFreeBucket( hypre_UnorderedBigIntMap *m,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *start_seg,
#endif
hypre_BigHopscotchBucket **free_bucket,
HYPRE_Int *free_dist)
{
hypre_BigHopscotchBucket* move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1);
HYPRE_Int move_free_dist;
for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist)
{
hypre_uint start_hop_info = move_bucket->hopInfo;
HYPRE_Int move_new_free_dist = -1;
hypre_uint mask = 1;
HYPRE_Int i;
for (i = 0; i < move_free_dist; ++i, mask <<= 1)
{
if (mask & start_hop_info)
{
move_new_free_dist = i;
break;
}
}
if (-1 != move_new_free_dist)
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* move_segment = &(m->segments[(move_bucket - m->table) & m->segmentMask]);
if (start_seg != move_segment)
{
omp_set_lock(&move_segment->lock);
}
#endif
if (start_hop_info == move_bucket->hopInfo)
{
// new_free_bucket -> free_bucket and empty new_free_bucket
hypre_BigHopscotchBucket* new_free_bucket = move_bucket + move_new_free_dist;
(*free_bucket)->data = new_free_bucket->data;
(*free_bucket)->key = new_free_bucket->key;
(*free_bucket)->hash = new_free_bucket->hash;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
++move_segment->timestamp;
#pragma omp flush
#endif
move_bucket->hopInfo |= (1U << move_free_dist);
move_bucket->hopInfo &= ~(1U << move_new_free_dist);
*free_bucket = new_free_bucket;
*free_dist -= move_free_dist - move_new_free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (start_seg != move_segment)
{
omp_unset_lock(&move_segment->lock);
}
#endif
return;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (start_seg != move_segment)
{
omp_unset_lock(&move_segment->lock);
}
#endif
}
++move_bucket;
}
*free_bucket = NULL;
*free_dist = 0;
}
void hypre_UnorderedIntSetCreate( hypre_UnorderedIntSet *s,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel);
void hypre_UnorderedBigIntSetCreate( hypre_UnorderedBigIntSet *s,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel);
void hypre_UnorderedIntMapCreate( hypre_UnorderedIntMap *m,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel);
void hypre_UnorderedBigIntMapCreate( hypre_UnorderedBigIntMap *m,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel);
void hypre_UnorderedIntSetDestroy( hypre_UnorderedIntSet *s );
void hypre_UnorderedBigIntSetDestroy( hypre_UnorderedBigIntSet *s );
void hypre_UnorderedIntMapDestroy( hypre_UnorderedIntMap *m );
void hypre_UnorderedBigIntMapDestroy( hypre_UnorderedBigIntMap *m );
// Query Operations .........................................................
static inline HYPRE_Int
hypre_UnorderedIntSetContains( hypre_UnorderedIntSet *s,
HYPRE_Int key )
{
//CALCULATE HASH ..........................
#ifdef HYPRE_BIGINT
HYPRE_Int hash = hypre_BigHash(key);
#else
HYPRE_Int hash = hypre_Hash(key);
#endif
//CHECK IF ALREADY CONTAIN ................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask];
#endif
HYPRE_Int bucket = hash & s->bucketMask;
hypre_uint hopInfo = s->hopInfo[bucket];
if (0 == hopInfo)
{
return 0;
}
else if (1 == hopInfo )
{
if (hash == s->hash[bucket] && key == s->key[bucket])
{
return 1;
}
else { return 0; }
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int startTimestamp = segment->timestamp;
#endif
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
HYPRE_Int currElm = bucket + i;
if (hash == s->hash[currElm] && key == s->key[currElm])
{
return 1;
}
hopInfo &= ~(1U << i);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (segment->timestamp == startTimestamp)
{
return 0;
}
#endif
HYPRE_Int i;
for (i = 0; i < HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i)
{
if (hash == s->hash[bucket + i] && key == s->key[bucket + i])
{
return 1;
}
}
return 0;
}
static inline HYPRE_Int
hypre_UnorderedBigIntSetContains( hypre_UnorderedBigIntSet *s,
HYPRE_BigInt key )
{
//CALCULATE HASH ..........................
#if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT)
HYPRE_BigInt hash = hypre_BigHash(key);
#else
HYPRE_BigInt hash = hypre_Hash(key);
#endif
//CHECK IF ALREADY CONTAIN ................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &s->segments[(HYPRE_Int)(hash & s->segmentMask)];
#endif
HYPRE_Int bucket = (HYPRE_Int)(hash & s->bucketMask);
hypre_uint hopInfo = s->hopInfo[bucket];
if (0 == hopInfo)
{
return 0;
}
else if (1 == hopInfo )
{
if (hash == s->hash[bucket] && key == s->key[bucket])
{
return 1;
}
else { return 0; }
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int startTimestamp = segment->timestamp;
#endif
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
HYPRE_Int currElm = bucket + i;
if (hash == s->hash[currElm] && key == s->key[currElm])
{
return 1;
}
hopInfo &= ~(1U << i);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (segment->timestamp == startTimestamp)
{
return 0;
}
#endif
HYPRE_Int i;
for (i = 0; i < HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i)
{
if (hash == s->hash[bucket + i] && key == s->key[bucket + i])
{
return 1;
}
}
return 0;
}
/**
* @ret -1 if key doesn't exist
*/
static inline HYPRE_Int
hypre_UnorderedIntMapGet( hypre_UnorderedIntMap *m,
HYPRE_Int key )
{
//CALCULATE HASH ..........................
#ifdef HYPRE_BIGINT
HYPRE_Int hash = hypre_BigHash(key);
#else
HYPRE_Int hash = hypre_Hash(key);
#endif
//CHECK IF ALREADY CONTAIN ................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask];
#endif
hypre_HopscotchBucket *elmAry = &(m->table[hash & m->bucketMask]);
hypre_uint hopInfo = elmAry->hopInfo;
if (0 == hopInfo)
{
return -1;
}
else if (1 == hopInfo )
{
if (hash == elmAry->hash && key == elmAry->key)
{
return elmAry->data;
}
else { return -1; }
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int startTimestamp = segment->timestamp;
#endif
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
hypre_HopscotchBucket* currElm = elmAry + i;
if (hash == currElm->hash && key == currElm->key)
{
return currElm->data;
}
hopInfo &= ~(1U << i);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (segment->timestamp == startTimestamp)
{
return -1;
}
#endif
hypre_HopscotchBucket *currBucket = &(m->table[hash & m->bucketMask]);
HYPRE_Int i;
for (i = 0; i < HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i, ++currBucket)
{
if (hash == currBucket->hash && key == currBucket->key)
{
return currBucket->data;
}
}
return -1;
}
static inline
HYPRE_Int hypre_UnorderedBigIntMapGet( hypre_UnorderedBigIntMap *m,
HYPRE_BigInt key )
{
//CALCULATE HASH ..........................
#if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT)
HYPRE_BigInt hash = hypre_BigHash(key);
#else
HYPRE_BigInt hash = hypre_Hash(key);
#endif
//CHECK IF ALREADY CONTAIN ................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &m->segments[(HYPRE_Int)(hash & m->segmentMask)];
#endif
hypre_BigHopscotchBucket *elmAry = &(m->table[(HYPRE_Int)(hash & m->bucketMask)]);
hypre_uint hopInfo = elmAry->hopInfo;
if (0 == hopInfo)
{
return -1;
}
else if (1 == hopInfo )
{
if (hash == elmAry->hash && key == elmAry->key)
{
return elmAry->data;
}
else { return -1; }
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int startTimestamp = segment->timestamp;
#endif
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
hypre_BigHopscotchBucket* currElm = elmAry + i;
if (hash == currElm->hash && key == currElm->key)
{
return currElm->data;
}
hopInfo &= ~(1U << i);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (segment->timestamp == startTimestamp)
{
return -1;
}
#endif
hypre_BigHopscotchBucket *currBucket = &(m->table[hash & m->bucketMask]);
HYPRE_Int i;
for (i = 0; i < HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i, ++currBucket)
{
if (hash == currBucket->hash && key == currBucket->key)
{
return currBucket->data;
}
}
return -1;
}
//status Operations .........................................................
static inline
HYPRE_Int hypre_UnorderedIntSetSize( hypre_UnorderedIntSet *s )
{
HYPRE_Int counter = 0;
HYPRE_Int n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i;
for (i = 0; i < n; ++i)
{
if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i])
{
++counter;
}
}
return counter;
}
static inline
HYPRE_Int hypre_UnorderedBigIntSetSize( hypre_UnorderedBigIntSet *s )
{
HYPRE_Int counter = 0;
HYPRE_BigInt n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i;
for (i = 0; i < n; ++i)
{
if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i])
{
++counter;
}
}
return counter;
}
static inline HYPRE_Int
hypre_UnorderedIntMapSize( hypre_UnorderedIntMap *m )
{
HYPRE_Int counter = 0;
HYPRE_Int n = m->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i;
for (i = 0; i < n; ++i)
{
if ( HYPRE_HOPSCOTCH_HASH_EMPTY != m->table[i].hash )
{
++counter;
}
}
return counter;
}
static inline HYPRE_Int
hypre_UnorderedBigIntMapSize( hypre_UnorderedBigIntMap *m )
{
HYPRE_Int counter = 0;
HYPRE_Int n = m->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i;
for (i = 0; i < n; ++i)
{
if ( HYPRE_HOPSCOTCH_HASH_EMPTY != m->table[i].hash )
{
++counter;
}
}
return counter;
}
HYPRE_Int *hypre_UnorderedIntSetCopyToArray( hypre_UnorderedIntSet *s, HYPRE_Int *len );
HYPRE_BigInt *hypre_UnorderedBigIntSetCopyToArray( hypre_UnorderedBigIntSet *s, HYPRE_Int *len );
//modification Operations ...................................................
static inline void
hypre_UnorderedIntSetPut( hypre_UnorderedIntSet *s,
HYPRE_Int key )
{
//CALCULATE HASH ..........................
#ifdef HYPRE_BIGINT
HYPRE_Int hash = hypre_BigHash(key);
#else
HYPRE_Int hash = hypre_Hash(key);
#endif
//LOCK KEY HASH ENTERY ....................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask];
omp_set_lock(&segment->lock);
#endif
HYPRE_Int bucket = hash & s->bucketMask;
//CHECK IF ALREADY CONTAIN ................
hypre_uint hopInfo = s->hopInfo[bucket];
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
HYPRE_Int currElm = bucket + i;
if (hash == s->hash[currElm] && key == s->key[currElm])
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return;
}
hopInfo &= ~(1U << i);
}
//LOOK FOR FREE BUCKET ....................
HYPRE_Int free_bucket = bucket;
HYPRE_Int free_dist = 0;
for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket)
{
if ( (HYPRE_HOPSCOTCH_HASH_EMPTY == s->hash[free_bucket]) &&
(HYPRE_HOPSCOTCH_HASH_EMPTY ==
hypre_compare_and_swap((HYPRE_Int *)&s->hash[free_bucket],
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) )
{
break;
}
}
//PLACE THE NEW KEY .......................
if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE)
{
do
{
if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE)
{
s->key[free_bucket] = key;
s->hash[free_bucket] = hash;
s->hopInfo[bucket] |= 1U << free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return;
}
hypre_UnorderedIntSetFindCloserFreeBucket(s,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
segment,
#endif
&free_bucket, &free_dist);
}
while (-1 != free_bucket);
}
//NEED TO RESIZE ..........................
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ERROR - RESIZE is not implemented\n");
/*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/
exit(1);
return;
}
static inline void
hypre_UnorderedBigIntSetPut( hypre_UnorderedBigIntSet *s,
HYPRE_BigInt key )
{
//CALCULATE HASH ..........................
#if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT)
HYPRE_BigInt hash = hypre_BigHash(key);
#else
HYPRE_BigInt hash = hypre_Hash(key);
#endif
//LOCK KEY HASH ENTERY ....................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask];
omp_set_lock(&segment->lock);
#endif
HYPRE_Int bucket = (HYPRE_Int)(hash & s->bucketMask);
//CHECK IF ALREADY CONTAIN ................
hypre_uint hopInfo = s->hopInfo[bucket];
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
HYPRE_Int currElm = bucket + i;
if (hash == s->hash[currElm] && key == s->key[currElm])
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return;
}
hopInfo &= ~(1U << i);
}
//LOOK FOR FREE BUCKET ....................
HYPRE_Int free_bucket = bucket;
HYPRE_Int free_dist = 0;
for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket)
{
if ( (HYPRE_HOPSCOTCH_HASH_EMPTY == s->hash[free_bucket]) &&
(HYPRE_HOPSCOTCH_HASH_EMPTY ==
hypre_compare_and_swap((HYPRE_Int *)&s->hash[free_bucket],
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) )
{
break;
}
}
//PLACE THE NEW KEY .......................
if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE)
{
do
{
if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE)
{
s->key[free_bucket] = key;
s->hash[free_bucket] = hash;
s->hopInfo[bucket] |= 1U << free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return;
}
hypre_UnorderedBigIntSetFindCloserFreeBucket(s,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
segment,
#endif
&free_bucket, &free_dist);
}
while (-1 != free_bucket);
}
//NEED TO RESIZE ..........................
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ERROR - RESIZE is not implemented\n");
/*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/
exit(1);
return;
}
static inline HYPRE_Int
hypre_UnorderedIntMapPutIfAbsent( hypre_UnorderedIntMap *m,
HYPRE_Int key, HYPRE_Int data )
{
//CALCULATE HASH ..........................
#ifdef HYPRE_BIGINT
HYPRE_Int hash = hypre_BigHash(key);
#else
HYPRE_Int hash = hypre_Hash(key);
#endif
//LOCK KEY HASH ENTERY ....................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask];
omp_set_lock(&segment->lock);
#endif
hypre_HopscotchBucket* startBucket = &(m->table[hash & m->bucketMask]);
//CHECK IF ALREADY CONTAIN ................
hypre_uint hopInfo = startBucket->hopInfo;
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
hypre_HopscotchBucket* currElm = startBucket + i;
if (hash == currElm->hash && key == currElm->key)
{
HYPRE_Int rc = currElm->data;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return rc;
}
hopInfo &= ~(1U << i);
}
//LOOK FOR FREE BUCKET ....................
hypre_HopscotchBucket* free_bucket = startBucket;
HYPRE_Int free_dist = 0;
for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket)
{
if ( (HYPRE_HOPSCOTCH_HASH_EMPTY == free_bucket->hash) &&
(HYPRE_HOPSCOTCH_HASH_EMPTY ==
hypre_compare_and_swap((HYPRE_Int *)&free_bucket->hash,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) )
{
break;
}
}
//PLACE THE NEW KEY .......................
if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE)
{
do
{
if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE)
{
free_bucket->data = data;
free_bucket->key = key;
free_bucket->hash = hash;
startBucket->hopInfo |= 1U << free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return HYPRE_HOPSCOTCH_HASH_EMPTY;
}
hypre_UnorderedIntMapFindCloserFreeBucket(m,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
segment,
#endif
&free_bucket, &free_dist);
}
while (NULL != free_bucket);
}
//NEED TO RESIZE ..........................
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ERROR - RESIZE is not implemented\n");
/*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/
exit(1);
return HYPRE_HOPSCOTCH_HASH_EMPTY;
}
static inline HYPRE_Int
hypre_UnorderedBigIntMapPutIfAbsent( hypre_UnorderedBigIntMap *m,
HYPRE_BigInt key, HYPRE_Int data)
{
//CALCULATE HASH ..........................
#if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT)
HYPRE_BigInt hash = hypre_BigHash(key);
#else
HYPRE_BigInt hash = hypre_Hash(key);
#endif
//LOCK KEY HASH ENTERY ....................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask];
omp_set_lock(&segment->lock);
#endif
hypre_BigHopscotchBucket* startBucket = &(m->table[hash & m->bucketMask]);
//CHECK IF ALREADY CONTAIN ................
hypre_uint hopInfo = startBucket->hopInfo;
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
hypre_BigHopscotchBucket* currElm = startBucket + i;
if (hash == currElm->hash && key == currElm->key)
{
HYPRE_Int rc = currElm->data;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return rc;
}
hopInfo &= ~(1U << i);
}
//LOOK FOR FREE BUCKET ....................
hypre_BigHopscotchBucket* free_bucket = startBucket;
HYPRE_Int free_dist = 0;
for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket)
{
if ( (HYPRE_HOPSCOTCH_HASH_EMPTY == free_bucket->hash) &&
(HYPRE_HOPSCOTCH_HASH_EMPTY ==
hypre_compare_and_swap((HYPRE_Int *)&free_bucket->hash,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) )
{
break;
}
}
//PLACE THE NEW KEY .......................
if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE)
{
do
{
if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE)
{
free_bucket->data = data;
free_bucket->key = key;
free_bucket->hash = hash;
startBucket->hopInfo |= 1U << free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return HYPRE_HOPSCOTCH_HASH_EMPTY;
}
hypre_UnorderedBigIntMapFindCloserFreeBucket(m,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
segment,
#endif
&free_bucket, &free_dist);
}
while (NULL != free_bucket);
}
//NEED TO RESIZE ..........................
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ERROR - RESIZE is not implemented\n");
/*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/
exit(1);
return HYPRE_HOPSCOTCH_HASH_EMPTY;
}
#ifdef __cplusplus
} // extern "C"
#endif
#endif // hypre_HOPSCOTCH_HASH_HEADER
|
parallel-simple.c | /*
* parallel-simple.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run | FileCheck %s
// REQUIRES: tsan
#include <omp.h>
#include <stdio.h>
int main(int argc, char *argv[]) {
int var = 0;
#pragma omp parallel num_threads(2) shared(var)
{
if (omp_get_thread_num() == 1) {
var++;
}
} // implicit barrier
var++;
fprintf(stderr, "DONE\n");
int error = (var != 2);
return error;
}
// CHECK-NOT: ThreadSanitizer: data race
// CHECK-NOT: ThreadSanitizer: reported
// CHECK-NOT: Warning: please export TSAN_OPTIONS
// CHECK: DONE
|
kmp_set_dispatch_buf.c | // RUN: %libomp-compile
// RUN: env KMP_DISP_NUM_BUFFERS=0 %libomp-run
// RUN: env KMP_DISP_NUM_BUFFERS=1 %libomp-run
// RUN: env KMP_DISP_NUM_BUFFERS=3 %libomp-run
// RUN: env KMP_DISP_NUM_BUFFERS=4 %libomp-run
// RUN: env KMP_DISP_NUM_BUFFERS=7 %libomp-run
// RUN: %libomp-compile -DMY_SCHEDULE=guided
// RUN: env KMP_DISP_NUM_BUFFERS=1 %libomp-run
// RUN: env KMP_DISP_NUM_BUFFERS=3 %libomp-run
// RUN: env KMP_DISP_NUM_BUFFERS=4 %libomp-run
// RUN: env KMP_DISP_NUM_BUFFERS=7 %libomp-run
// UNSUPPORTED: clang-11, clang-12
#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#include <limits.h>
#include "omp_testsuite.h"
#define INCR 7
#define MY_MAX 200
#define MY_MIN -200
#define NUM_LOOPS 100
#ifndef MY_SCHEDULE
# define MY_SCHEDULE dynamic
#endif
int a, b, a_known_value, b_known_value;
int test_kmp_set_disp_num_buffers()
{
int success = 1;
a = 0;
b = 0;
// run many small dynamic loops to stress the dispatch buffer system
#pragma omp parallel
{
int i,j;
for (j = 0; j < NUM_LOOPS; j++) {
#pragma omp for schedule(MY_SCHEDULE) nowait
for (i = MY_MIN; i < MY_MAX; i+=INCR) {
#pragma omp atomic
a++;
}
#pragma omp for schedule(MY_SCHEDULE) nowait
for (i = MY_MAX; i >= MY_MIN; i-=INCR) {
#pragma omp atomic
b++;
}
}
}
// detect failure
if (a != a_known_value || b != b_known_value) {
success = 0;
printf("a = %d (should be %d), b = %d (should be %d)\n", a, a_known_value,
b, b_known_value);
}
return success;
}
int main(int argc, char** argv)
{
int i,j;
int num_failed=0;
// figure out the known values to compare with calculated result
a_known_value = 0;
b_known_value = 0;
for (j = 0; j < NUM_LOOPS; j++) {
for (i = MY_MIN; i < MY_MAX; i+=INCR)
a_known_value++;
for (i = MY_MAX; i >= MY_MIN; i-=INCR)
b_known_value++;
}
for(i = 0; i < REPETITIONS; i++) {
if(!test_kmp_set_disp_num_buffers()) {
num_failed++;
}
}
return num_failed;
}
|
srad.c | //====================================================================================================100
// UPDATE
//====================================================================================================100
// 2006.03 Rob Janiczek
// --creation of prototype version
// 2006.03 Drew Gilliam
// --rewriting of prototype version into current version
// --got rid of multiple function calls, all code in a
// single function (for speed)
// --code cleanup & commenting
// --code optimization efforts
// 2006.04 Drew Gilliam
// --added diffusion coefficent saturation on [0,1]
// 2009.12 Lukasz G. Szafaryn
// -- reading from image, command line inputs
// 2010.01 Lukasz G. Szafaryn
// --comments
//====================================================================================================100
// DEFINE / INCLUDE
//====================================================================================================100
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <omp.h>
#include "define.c"
#include "graphics.c"
#include "resize.c"
#include "timer.c"
//====================================================================================================100
//====================================================================================================100
// MAIN FUNCTION
//====================================================================================================100
//====================================================================================================100
int main(int argc, char *argv[]) {
//================================================================================80
// VARIABLES
//================================================================================80
// time
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
long long time7;
long long time8;
long long time9;
long long time10;
time0 = get_time();
// inputs image, input paramenters
fp *image_ori; // originalinput image
int image_ori_rows;
int image_ori_cols;
long image_ori_elem;
// inputs image, input paramenters
fp *image; // input image
long Nr, Nc; // IMAGE nbr of rows/cols/elements
long Ne;
// algorithm parameters
int niter; // nbr of iterations
fp lambda; // update step size
// size of IMAGE
int r1, r2, c1, c2; // row/col coordinates of uniform ROI
long NeROI; // ROI nbr of elements
// ROI statistics
fp meanROI, varROI, q0sqr; // local region statistics
// surrounding pixel indicies
int *iN, *iS, *jE, *jW;
// center pixel value
fp Jc;
// directional derivatives
fp *dN, *dS, *dW, *dE;
// calculation variables
fp tmp, sum, sum2;
fp G2, L, num, den, qsqr, D;
// diffusion coefficient
fp *c;
fp cN, cS, cW, cE;
// counters
int iter; // primary loop
long i, j; // image row/col
long k; // image single index
// number of threads
int threads;
time1 = get_time();
//================================================================================80
// GET INPUT PARAMETERS
//================================================================================80
if (argc != 6) {
printf("ERROR: wrong number of arguments\n");
return 0;
} else {
niter = atoi(argv[1]);
lambda = atof(argv[2]);
Nr = atoi(argv[3]); // it is 502 in the original image
Nc = atoi(argv[4]); // it is 458 in the original image
threads = atoi(argv[5]);
}
omp_set_num_threads(threads);
// printf("THREAD %d\n", omp_get_thread_num());
// printf("NUMBER OF THREADS: %d\n", omp_get_num_threads());
time2 = get_time();
//================================================================================80
// READ IMAGE (SIZE OF IMAGE HAS TO BE KNOWN)
//================================================================================80
// read image
image_ori_rows = 502;
image_ori_cols = 458;
image_ori_elem = image_ori_rows * image_ori_cols;
image_ori = (fp *)malloc(sizeof(fp) * image_ori_elem);
read_graphics("../../../data/srad/image.pgm", image_ori, image_ori_rows,
image_ori_cols, 1);
time3 = get_time();
//================================================================================80
// RESIZE IMAGE (ASSUMING COLUMN MAJOR STORAGE OF image_orig)
//================================================================================80
Ne = Nr * Nc;
image = (fp *)malloc(sizeof(fp) * Ne);
resize(image_ori, image_ori_rows, image_ori_cols, image, Nr, Nc, 1);
time4 = get_time();
//================================================================================80
// SETUP
//================================================================================80
r1 = 0; // top row index of ROI
r2 = Nr - 1; // bottom row index of ROI
c1 = 0; // left column index of ROI
c2 = Nc - 1; // right column index of ROI
// ROI image size
NeROI =
(r2 - r1 + 1) * (c2 - c1 + 1); // number of elements in ROI, ROI size
// allocate variables for surrounding pixels
iN = malloc(sizeof(int *) * Nr); // north surrounding element
iS = malloc(sizeof(int *) * Nr); // south surrounding element
jW = malloc(sizeof(int *) * Nc); // west surrounding element
jE = malloc(sizeof(int *) * Nc); // east surrounding element
// allocate variables for directional derivatives
dN = malloc(sizeof(fp) * Ne); // north direction derivative
dS = malloc(sizeof(fp) * Ne); // south direction derivative
dW = malloc(sizeof(fp) * Ne); // west direction derivative
dE = malloc(sizeof(fp) * Ne); // east direction derivative
// allocate variable for diffusion coefficient
c = malloc(sizeof(fp) * Ne); // diffusion coefficient
// N/S/W/E indices of surrounding pixels (every element of IMAGE)
// #pragma omp parallel
for (i = 0; i < Nr; i++) {
iN[i] = i - 1; // holds index of IMAGE row above
iS[i] = i + 1; // holds index of IMAGE row below
}
// #pragma omp parallel
for (j = 0; j < Nc; j++) {
jW[j] = j - 1; // holds index of IMAGE column on the left
jE[j] = j + 1; // holds index of IMAGE column on the right
}
// N/S/W/E boundary conditions, fix surrounding indices outside boundary of
// IMAGE
iN[0] = 0; // changes IMAGE top row index from -1 to 0
iS[Nr - 1] = Nr - 1; // changes IMAGE bottom row index from Nr to Nr-1
jW[0] = 0; // changes IMAGE leftmost column index from -1 to 0
jE[Nc - 1] = Nc - 1; // changes IMAGE rightmost column index from Nc to Nc-1
time5 = get_time();
//================================================================================80
// SCALE IMAGE DOWN FROM 0-255 TO 0-1 AND EXTRACT
//================================================================================80
// #pragma omp parallel
for (i = 0; i < Ne; i++) { // do for the number of elements in input IMAGE
image[i] =
exp(image[i] /
255); // exponentiate input IMAGE and copy to output image
}
time6 = get_time();
//================================================================================80
// COMPUTATION
//================================================================================80
// printf("iterations: ");
// primary loop
for (iter = 0; iter < niter;
iter++) { // do for the number of iterations input parameter
// printf("%d ", iter);
// fflush(NULL);
// ROI statistics for entire ROI (single number for ROI)
sum = 0;
sum2 = 0;
for (i = r1; i <= r2; i++) { // do for the range of rows in ROI
for (j = c1; j <= c2; j++) { // do for the range of columns in ROI
tmp = image[i + Nr * j]; // get coresponding value in IMAGE
sum += tmp; // take corresponding value and add to sum
sum2 +=
tmp *
tmp; // take square of corresponding value and add to sum2
}
}
meanROI = sum / NeROI; // gets mean (average) value of element in ROI
varROI = (sum2 / NeROI) - meanROI * meanROI; // gets variance of ROI
q0sqr = varROI / (meanROI * meanROI); // gets standard deviation of ROI
// directional derivatives, ICOV, diffusion coefficent
#pragma omp parallel for shared(image, dN, dS, dW, dE, c, Nr, Nc, iN, iS, jW, \
jE) private(i, j, k, Jc, G2, L, num, den, \
qsqr)
for (j = 0; j < Nc; j++) { // do for the range of columns in IMAGE
for (i = 0; i < Nr; i++) { // do for the range of rows in IMAGE
// current index/pixel
k = i + Nr * j; // get position of current element
Jc = image[k]; // get value of the current element
// directional derivates (every element of IMAGE)
dN[k] =
image[iN[i] + Nr * j] - Jc; // north direction derivative
dS[k] =
image[iS[i] + Nr * j] - Jc; // south direction derivative
dW[k] = image[i + Nr * jW[j]] - Jc; // west direction derivative
dE[k] = image[i + Nr * jE[j]] - Jc; // east direction derivative
// normalized discrete gradient mag squared (equ 52,53)
G2 = (dN[k] * dN[k] +
dS[k] * dS[k] // gradient (based on derivatives)
+ dW[k] * dW[k] + dE[k] * dE[k]) /
(Jc * Jc);
// normalized discrete laplacian (equ 54)
L = (dN[k] + dS[k] + dW[k] + dE[k]) /
Jc; // laplacian (based on derivatives)
// ICOV (equ 31/35)
num = (0.5 * G2) -
((1.0 / 16.0) *
(L * L)); // num (based on gradient and laplacian)
den = 1 + (.25 * L); // den (based on laplacian)
qsqr = num / (den * den); // qsqr (based on num and den)
// diffusion coefficent (equ 33) (every element of IMAGE)
den = (qsqr - q0sqr) /
(q0sqr * (1 + q0sqr)); // den (based on qsqr and q0sqr)
c[k] =
1.0 / (1.0 + den); // diffusion coefficient (based on den)
// saturate diffusion coefficent to 0-1 range
if (c[k] < 0) // if diffusion coefficient < 0
{
c[k] = 0;
} // ... set to 0
else if (c[k] > 1) // if diffusion coefficient > 1
{
c[k] = 1;
} // ... set to 1
}
}
// divergence & image update
#pragma omp parallel for shared(image, c, Nr, Nc, \
lambda) private(i, j, k, D, cS, cN, cW, cE)
for (j = 0; j < Nc; j++) { // do for the range of columns in IMAGE
// printf("NUMBER OF THREADS: %d\n", omp_get_num_threads());
for (i = 0; i < Nr; i++) { // do for the range of rows in IMAGE
// current index
k = i + Nr * j; // get position of current element
// diffusion coefficent
cN = c[k]; // north diffusion coefficient
cS = c[iS[i] + Nr * j]; // south diffusion coefficient
cW = c[k]; // west diffusion coefficient
cE = c[i + Nr * jE[j]]; // east diffusion coefficient
// divergence (equ 58)
D = cN * dN[k] + cS * dS[k] + cW * dW[k] +
cE * dE[k]; // divergence
// image update (equ 61) (every element of IMAGE)
image[k] = image[k] + 0.25 * lambda * D; // updates image (based
// on input time step
// and divergence)
}
}
}
// printf("\n");
time7 = get_time();
//================================================================================80
// SCALE IMAGE UP FROM 0-1 TO 0-255 AND COMPRESS
//================================================================================80
// #pragma omp parallel
for (i = 0; i < Ne; i++) { // do for the number of elements in IMAGE
image[i] = log(image[i]) * 255; // take logarithm of image, log compress
}
time8 = get_time();
//================================================================================80
// WRITE IMAGE AFTER PROCESSING
//================================================================================80
write_graphics("image_out.pgm", image, Nr, Nc, 1, 255);
time9 = get_time();
//================================================================================80
// DEALLOCATE
//================================================================================80
free(image_ori);
free(image);
free(iN);
free(iS);
free(jW);
free(jE); // deallocate surrounding pixel memory
free(dN);
free(dS);
free(dW);
free(dE); // deallocate directional derivative memory
free(c); // deallocate diffusion coefficient memory
time10 = get_time();
//================================================================================80
// DISPLAY TIMING
//================================================================================80
printf("Time spent in different stages of the application:\n");
printf("%.12f s, %.12f % : SETUP VARIABLES\n",
(float)(time1 - time0) / 1000000,
(float)(time1 - time0) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : READ COMMAND LINE PARAMETERS\n",
(float)(time2 - time1) / 1000000,
(float)(time2 - time1) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : READ IMAGE FROM FILE\n",
(float)(time3 - time2) / 1000000,
(float)(time3 - time2) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : RESIZE IMAGE\n",
(float)(time4 - time3) / 1000000,
(float)(time4 - time3) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : SETUP, MEMORY ALLOCATION\n",
(float)(time5 - time4) / 1000000,
(float)(time5 - time4) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : EXTRACT IMAGE\n",
(float)(time6 - time5) / 1000000,
(float)(time6 - time5) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : COMPUTE\n", (float)(time7 - time6) / 1000000,
(float)(time7 - time6) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : COMPRESS IMAGE\n",
(float)(time8 - time7) / 1000000,
(float)(time8 - time7) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : SAVE IMAGE INTO FILE\n",
(float)(time9 - time8) / 1000000,
(float)(time9 - time8) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : FREE MEMORY\n",
(float)(time10 - time9) / 1000000,
(float)(time10 - time9) / (float)(time10 - time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float)(time10 - time0) / 1000000);
//====================================================================================================100
// END OF FILE
//====================================================================================================100
}
|
IntegralOrbitals.c | /* CoulombOrbitals.c */
/**********************************************************************************************************
Copyright (c) 2002-2013 Abdul-Rahman Allouche. All rights reserved
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the Gabedit), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
************************************************************************************************************/
#include "../../Config.h"
#include "../Display/GlobalOrb.h"
#ifdef ENABLE_OMP
#include <omp.h>
#endif
#include "../Utils/Vector3d.h"
#include "../Utils/GTF.h"
#include "../Display/GLArea.h"
#include "../Display/Orbitals.h"
#include "../Display/OrbitalsMolpro.h"
#include "../Display/OrbitalsGamess.h"
#include "../Display/OrbitalsQChem.h"
#include "../Display/GeomOrbXYZ.h"
#include "../Display/BondsOrb.h"
#include "../Display/UtilsOrb.h"
#include "../Display/TriangleDraw.h"
#include "../Utils/Utils.h"
#include "../Utils/UtilsInterface.h"
#include "../Utils/Constants.h"
#include "../Utils/GabeditTextEdit.h"
#include "../Files/FileChooser.h"
#include "../Common/Windows.h"
#include "../Display/Vibration.h"
#include "../Display/ContoursPov.h"
#include "../Display/PlanesMappedPov.h"
#include "../Display/LabelsGL.h"
#include "../Display/StatusOrb.h"
#define WIDTHSCR 0.3
typedef gboolean (*FuncCompCoulomb)(gint N[],GridLimits limits, gint typeOrbi, gint i, gint typeOrbj, gint j,
gdouble* pInteg, gdouble* pNorm, gdouble* pNormj, gdouble* pOverlap);
/********************************************************************************/
/* <ii|delta(r_i,r_j)|jj>*/
gdouble compute_spatial_overlap_analytic(gint typeOrbi, gint i, gint typeOrbj, gint j, gdouble schwarzCutOff)
{
gdouble** CoefI = CoefAlphaOrbitals;
gdouble** CoefJ = CoefAlphaOrbitals;
gint k,kp;
gint l,lp;
gdouble scal;
gchar tmp[BSIZE];
gint* p;
gint* q;
gdouble* cci;
gdouble* ccj;
gint kk;
gint ll;
gulong delta = 0;
gint pos = 0;
gdouble cc = 0;
gulong nAll = 0;
gdouble integ;
gint N;
gdouble pqrs;
gdouble* mnmn;
gulong nComp = 0;
integ = 0;
if(typeOrbi != typeOrbj )
{
/* stop calculation */
CancelCalcul = TRUE;
return integ ;
}
if(typeOrbi == 2) CoefI = CoefBetaOrbitals;
if(typeOrbj == 2) CoefJ = CoefBetaOrbitals;
N = NAOrb*(NAOrb+1)/2;
if(N<1)return -1.0;
mnmn = g_malloc(N*sizeof(gdouble));
p = g_malloc(N*sizeof(gint));
q = g_malloc(N*sizeof(gint));
cci = g_malloc(N*sizeof(gdouble));
ccj = g_malloc(N*sizeof(gdouble));
sprintf(tmp,_("Computing of <%d %d|delta(ri,rj)| %d %d>.... Please wait"),i+1,i+1,j+1,j+1);
setTextInProgress(tmp);
kk = 0;
for(k=0;k<NAOrb;k++)
for(kp=k;kp<NAOrb;kp++)
{
p[kk] = k;
q[kk] = kp;
cci[kk] = 2*CoefI[i][k]*CoefI[i][kp]/((k==kp)?2:1);
ccj[kk] = 2*CoefJ[j][k]*CoefJ[j][kp]/((k==kp)?2:1);
mnmn[kk] = 0.0;
kk++;
}
scal = 0.01;
delta = (gint)(N*(N+1.0)/2.0*scal);
if(delta<1) delta = N*(N+1)/20;
if(delta<1) delta = 1;
pos = delta;
/* printf("delta = %ld\n",delta);*/
progress_orb_txt(0,"tmp",TRUE);
/* For do a Schwarz screening */
#ifdef ENABLE_OMP
#ifdef G_OS_WIN32
setTextInProgress(_("Computing of spatial integrale, pleasse wait..."));
#endif
#pragma omp parallel for private(k,kp,kk,pqrs) reduction(+:integ,nAll,nComp,pos)
#endif
for(kk=0;kk<N;kk++)
{
k = p[kk];
kp = q[kk];
pqrs = overlap4CGTF(&AOrb[k],&AOrb[kp],&AOrb[k],&AOrb[kp]);
integ += (cci[kk]*ccj[kk])*pqrs;
mnmn[kk] = sqrt(fabs(pqrs));
nAll++;
nComp++;
if(nAll>=pos)
{
pos += delta;
#ifdef ENABLE_OMP
#ifndef G_OS_WIN32
#pragma omp critical
progress_orb_txt(scal,tmp,FALSE);
#endif
#else
progress_orb_txt(scal,tmp,FALSE);
#endif
}
}
#ifdef ENABLE_OMP
#ifdef G_OS_WIN32
setTextInProgress(_("Computing of spatial integrale, pleasse wait..."));
#endif
#pragma omp parallel for private(k,kp,l,lp,kk,ll,pqrs,cc) reduction(+:integ,nAll,nComp,pos)
#endif
for(kk=0;kk<N;kk++)
{
k = p[kk];
kp = q[kk];
if(!CancelCalcul)
for(ll=0;ll<kk;ll++)
{
if(!CancelCalcul)
{
l = p[ll];
lp = q[ll];
nAll++;
if(nAll>=pos)
{
pos += delta;
#ifdef ENABLE_OMP
#ifndef G_OS_WIN32
#pragma omp critical
progress_orb_txt(scal,tmp,FALSE);
#endif
#else
progress_orb_txt(scal,tmp,FALSE);
#endif
}
cc = (cci[kk]*ccj[ll]+cci[ll]*ccj[kk]);
if(fabs(cc*mnmn[kk]*mnmn[ll])>=schwarzCutOff)
{
pqrs = overlap4CGTF(&AOrb[k],&AOrb[kp],&AOrb[l],&AOrb[lp]);
integ += cc*pqrs;
nComp++;
}
}
}
}
sprintf(tmp,"# of all <pq|rs> = %ld, # of computed <pq|rs> %ld\n",nAll, nComp);
progress_orb_txt(0,tmp,TRUE);
g_free(mnmn);
g_free(p);
g_free(q);
g_free(cci);
g_free(ccj);
return integ;
}
/********************************************************************************/
void compute_transition_matrix_analytic(gint typeOrbi, gint i, gint typeOrbj, gint j, gdouble integ[])
{
gint k;
gint l;
gdouble** CoefI = CoefAlphaOrbitals;
gdouble** CoefJ = CoefAlphaOrbitals;
gdouble s = 0;
integ[0] = 0;
integ[1] = 0;
integ[2] = 0;
if(typeOrbi != typeOrbj ) return;
if(typeOrbi == 2) CoefI = CoefBetaOrbitals;
if(typeOrbj == 2) CoefJ = CoefBetaOrbitals;
s = 0;
#ifdef ENABLE_OMP
printf("# proc = %d\n", omp_get_num_procs ());
#pragma omp parallel for private(k) reduction(+:s)
#endif
for(k=0;k<NAOrb;k++)
s += CoefI[i][k]*CoefJ[j][k]*CGTFxyzCGTF(&AOrb[k],&AOrb[k],1,0,0);
s = 0;
integ[0] += s;
#ifdef ENABLE_OMP
#pragma omp parallel for private(k) reduction(+:s)
#endif
for(k=0;k<NAOrb;k++)
s += CoefI[i][k]*CoefJ[j][k]*CGTFxyzCGTF(&AOrb[k],&AOrb[k],0,1,0);
integ[1] += s;
s = 0;
#ifdef ENABLE_OMP
#pragma omp parallel for private(k) reduction(+:s)
#endif
for(k=0;k<NAOrb;k++)
s += CoefI[i][k]*CoefJ[j][k]*CGTFxyzCGTF(&AOrb[k],&AOrb[k],0,0,1);
integ[2] += s;
s = 0;
#ifdef ENABLE_OMP
#pragma omp parallel for private(k,l) reduction(+:s)
#endif
for(k=0;k<NAOrb;k++)
for(l=k+1;l<NAOrb;l++)
s += (CoefI[i][k]*CoefJ[j][l]+CoefI[i][l]*CoefJ[j][k])*CGTFxyzCGTF(&AOrb[k],&AOrb[l],1,0,0);
integ[0] += s;
s = 0;
#ifdef ENABLE_OMP
#pragma omp parallel for private(k,l) reduction(+:s)
#endif
for(k=0;k<NAOrb;k++)
for(l=k+1;l<NAOrb;l++)
s += (CoefI[i][k]*CoefJ[j][l]+CoefI[i][l]*CoefJ[j][k])*CGTFxyzCGTF(&AOrb[k],&AOrb[l],0,1,0);
integ[1] += s;
s = 0;
#ifdef ENABLE_OMP
#pragma omp parallel for private(k,l) reduction(+:s)
#endif
for(k=0;k<NAOrb;k++)
for(l=k+1;l<NAOrb;l++)
s += (CoefI[i][k]*CoefJ[j][l]+CoefI[i][l]*CoefJ[j][k])*CGTFxyzCGTF(&AOrb[k],&AOrb[l],0,0,1);
integ[2] += s;
}
/********************************************************************************/
gdouble get_overlap_analytic(gint typeOrbi, gint i, gint typeOrbj, gint j)
{
gint k;
gint l;
gdouble v=0.0;
gdouble** CoefI = CoefAlphaOrbitals;
gdouble** CoefJ = CoefAlphaOrbitals;
if(typeOrbi != typeOrbj ) return 0.0;
if(typeOrbi == 2) CoefI = CoefBetaOrbitals;
if(typeOrbj == 2) CoefJ = CoefBetaOrbitals;
#ifdef ENABLE_OMP
#pragma omp parallel for private(k) reduction(+:v)
#endif
for(k=0;k<NAOrb;k++)
v += CoefI[i][k]*CoefJ[j][k]*overlapCGTF(&AOrb[k],&AOrb[k]);
#ifdef ENABLE_OMP
#pragma omp parallel for private(k,l) reduction(+:v)
#endif
for(k=0;k<NAOrb;k++)
for(l=k+1;l<NAOrb;l++)
v += (CoefI[i][k]*CoefJ[j][l]+CoefI[i][l]*CoefJ[j][k])*overlapCGTF(&AOrb[k],&AOrb[l]);
return v;
}
/********************************************************************************/
/*
gdouble get_coulomb_analytic(gint typeOrbi, gint i, gint typeOrbj, gint j)
{
gint k,kp;
gint l,lp;
gdouble v=0.0;
gdouble** CoefI = CoefAlphaOrbitals;
gdouble** CoefJ = CoefAlphaOrbitals;
gdouble d,eri;
gdouble scal;
gint N = NAOrb*(NAOrb+1)/2;
gint* p = g_malloc(N*sizeof(gint));
gint* q = g_malloc(N*sizeof(gint));
gint* dpq = g_malloc(N*sizeof(gint));
gdouble* cci = g_malloc(N*sizeof(gdouble));
gdouble* ccj = g_malloc(N*sizeof(gdouble));
gint kk;
gint ll;
gint dkkll;
scal = (gdouble)1.01/N;
if(typeOrbi == 2) CoefI = CoefBetaOrbitals;
if(typeOrbj == 2) CoefJ = CoefBetaOrbitals;
kk = 0;
for(k=0;k<NAOrb;k++)
for(kp=k;kp<NAOrb;kp++)
{
p[kk] = k;
q[kk] = kp;
dpq[kk] =(k==kp)?2:1;
cci[kk] = CoefI[i][k]*CoefI[i][kp];
ccj[kk] = CoefJ[j][k]*CoefJ[j][kp];
kk++;
}
progress_orb(0,GABEDIT_PROGORB_COMPINTEG,TRUE);
for(kk=0;kk<N;kk++)
{
k = p[kk];
kp = q[kk];
progress_orb(scal,GABEDIT_PROGORB_COMPINTEG,FALSE);
if(CancelCalcul)
{
progress_orb(0,GABEDIT_PROGORB_COMPINTEG,TRUE);
break;
}
if(fabs(cci[kk])<1e-12 && fabs(ccj[kk])<1e-12 )continue;
if(!CancelCalcul)
for(ll=kk;ll<N;ll++)
{
l = p[ll];
lp = q[ll];
if(CancelCalcul) break;
if(fabs(cci[ll])<1e-12 && fabs(ccj[ll])<1e-12 )continue;
dkkll=(kk==ll)?2:1;
d = dpq[kk]*dpq[ll]*dkkll;
eri = ERICGTF(&AOrb[k],&AOrb[kp],&AOrb[l],&AOrb[lp]);
v += 4*(cci[kk]*ccj[ll]+cci[ll]*ccj[kk])*eri/d;
}
}
progress_orb(0,GABEDIT_PROGORB_COMPINTEG,TRUE);
g_free(p);
g_free(q);
g_free(dpq);
g_free(cci);
g_free(ccj);
if(CancelCalcul) return -1.0;
return v;
}
*/
/********************************************************************************/
/*
gdouble get_coulomb_analytic(gint typeOrbi, gint i, gint typeOrbj, gint j)
{
gint k,kp;
gint l,lp;
gdouble v=0.0;
gdouble** CoefI = CoefAlphaOrbitals;
gdouble** CoefJ = CoefAlphaOrbitals;
gdouble a,b,eri;
gdouble scal;
scal = (gdouble)2.02/NAOrb/(NAOrb+1);
gdouble cci = 0;
gdouble ccj = 0;
if(typeOrbi == 2) CoefI = CoefBetaOrbitals;
if(typeOrbj == 2) CoefJ = CoefBetaOrbitals;
progress_orb(0,GABEDIT_PROGORB_COMPINTEG,TRUE);
for(k=0;k<NAOrb;k++)
for(kp=k;kp<NAOrb;kp++)
{
cci = CoefI[i][k]*CoefI[i][kp];
progress_orb(scal,GABEDIT_PROGORB_COMPINTEG,FALSE);
if(CancelCalcul)
{
progress_orb(0,GABEDIT_PROGORB_COMPINTEG,TRUE);
break;
}
if(fabs(cci)<1e-12)continue;
a=(k==kp)?1:2;
if(!CancelCalcul)
for(l=0;l<NAOrb;l++)
for(lp=l;lp<NAOrb;lp++)
{
if(CancelCalcul) break;
ccj = CoefJ[j][l]*CoefJ[j][lp];
if(fabs(ccj)<1e-12)continue;
b=(l==lp)?1:2;
eri = ERICGTF(&AOrb[k],&AOrb[kp],&AOrb[l],&AOrb[lp]);
v += cci*ccj*eri*a*b;
}
}
progress_orb(0,GABEDIT_PROGORB_COMPINTEG,TRUE);
if(CancelCalcul) return -1.0;
return v;
}
*/
/********************************************************************************/
gdouble get_coulomb_analytic(gint typeOrbi, gint i, gint typeOrbj, gint j, gdouble schwarzCutOff)
{
gint k,kp;
gint l,lp;
gdouble v=0.0;
gdouble** CoefI = CoefAlphaOrbitals;
gdouble** CoefJ = CoefAlphaOrbitals;
gdouble eri = 0;
gdouble scal;
gchar tmp[BSIZE];
gint N = NAOrb*(NAOrb+1)/2;
gint* p = g_malloc(N*sizeof(gint));
gint* q = g_malloc(N*sizeof(gint));
gdouble* cci = g_malloc(N*sizeof(gdouble));
gdouble* ccj = g_malloc(N*sizeof(gdouble));
gdouble* mnmn = g_malloc(N*sizeof(gdouble));
gint kk;
gint ll;
gulong delta = 0;
gint pos = 0;
TTABLES** Ttables = NULL;
gdouble cc = 0;
gdouble ccmn = 0;
gulong nAll = 0;
gulong nComp = 0;
if(N<1)return -1.0;
setTextInProgress(_("Creation of T1 and T2 tables... Please wait"));
Ttables = createTTables(AOrb, NAOrb, 1e-9);
if(!Ttables) return -1.0;
sprintf(tmp,_("Computing of <%d %d|1/r12| %d %d>.... Please wait"),i+1,i+1,j+1,j+1);
if(typeOrbi == 2) CoefI = CoefBetaOrbitals;
if(typeOrbj == 2) CoefJ = CoefBetaOrbitals;
kk = 0;
for(k=0;k<NAOrb;k++)
for(kp=k;kp<NAOrb;kp++)
{
p[kk] = k;
q[kk] = kp;
cci[kk] = 2*CoefI[i][k]*CoefI[i][kp]/((k==kp)?2:1);
ccj[kk] = 2*CoefJ[j][k]*CoefJ[j][kp]/((k==kp)?2:1);
mnmn[kk] = 0.0;
kk++;
}
scal = 0.01;
delta = (gint)(N*(N+1.0)/2.0*scal);
if(delta<1) delta = N*(N+1)/20;
if(delta<1) delta = 1;
pos = delta;
/* printf("delta = %ld\n",delta);*/
progress_orb_txt(0,_("Computing of 2 centers Coulomb integrals... Please wait"),TRUE);
/* For do a Schwarz screening */
#ifdef ENABLE_OMP
#ifdef G_OS_WIN32
setTextInProgress(_("Computing of eri, pleasse wait..."));
#endif
#pragma omp parallel for private(k,kp,kk,eri) reduction(+:v,nAll,nComp,pos)
#endif
for(kk=0;kk<N;kk++)
{
k = p[kk];
kp = q[kk];
eri = ERICTABLES(k,kp,k,kp,Ttables);
v += (cci[kk]*ccj[kk])*eri;
mnmn[kk] = sqrt(fabs(eri));
nAll++;
nComp++;
if(nAll>=pos)
{
pos += delta;
#ifdef ENABLE_OMP
#ifndef G_OS_WIN32
#pragma omp critical
progress_orb_txt(scal,tmp,FALSE);
#endif
#else
progress_orb_txt(scal,tmp,FALSE);
#endif
}
}
#ifdef ENABLE_OMP
#ifdef G_OS_WIN32
setTextInProgress(_("Computing of eri, pleasse wait..."));
#endif
#pragma omp parallel for private(k,kp,l,lp,kk,ll,eri,cc,ccmn) reduction(+:v,nAll,nComp,pos)
#endif
for(kk=0;kk<N;kk++)
{
k = p[kk];
kp = q[kk];
if(!CancelCalcul)
for(ll=0;ll<kk;ll++)
{
if(!CancelCalcul)
{
l = p[ll];
lp = q[ll];
nAll++;
if(nAll>=pos)
{
pos += delta;
#ifdef ENABLE_OMP
#ifndef G_OS_WIN32
#pragma omp critical
progress_orb_txt(scal,tmp,FALSE);
#endif
#else
progress_orb_txt(scal,tmp,FALSE);
#endif
}
cc = (cci[kk]*ccj[ll]+cci[ll]*ccj[kk]);
/* Schwarz screening */
ccmn = cc*mnmn[kk]*mnmn[ll];
if(fabs(ccmn)<schwarzCutOff)
{
continue;
}
eri = ERICTABLES(k,kp,l,lp,Ttables);
v += cc*eri;
nComp++;
}
}
}
sprintf(tmp,_("# of all ERI = %ld, # of computed ERI = %ld"),nAll, nComp);
freeTTables(NAOrb,Ttables);
progress_orb_txt(0,tmp,TRUE);
g_free(p);
g_free(q);
g_free(cci);
g_free(ccj);
g_free(mnmn);
if(CancelCalcul) return -1.0;
return v;
}
/********************************************************************************/
static gint* get_num_of_selected_orbitals(GtkWidget *gtklist, gint* n)
{
gint* numOrbs = NULL;
*n = 0;
if (gtklist == NULL) return NULL;
if(!GTK_IS_TREE_VIEW(gtklist)) return NULL;
{
GtkTreeSelection *selection;
GtkTreeModel *model;
GList *selected_rows = NULL;
GList *row;
GtkTreePath *path = NULL;
gint* indices = NULL;
gint i = 0;
selection = gtk_tree_view_get_selection (GTK_TREE_VIEW(gtklist));
if(selection) selected_rows = gtk_tree_selection_get_selected_rows (selection, &model);
*n = gtk_tree_selection_count_selected_rows(selection);
if(*n<1) return numOrbs;
numOrbs = g_malloc(*n*sizeof(gint));
i =0;
for (row = g_list_first (selected_rows); row != NULL; row = g_list_next (row))
{
path = (GtkTreePath *)(row->data);
indices = gtk_tree_path_get_indices(path);
numOrbs[i++] = indices[0];
if(i>=*n) break;
}
}
return numOrbs;
}
/********************************************************************************/
static void numeriButtonClicked(GtkWidget *numericButton,gpointer data)
{
GtkWidget* frameGrid = g_object_get_data (G_OBJECT (numericButton), "FrameGrid");
GtkWidget* labelSchwarz = g_object_get_data (G_OBJECT (numericButton), "LabelSchwarz");
GtkWidget* entrySchwarz = g_object_get_data (G_OBJECT (numericButton), "EntrySchwarz");
gboolean checked = GTK_TOGGLE_BUTTON (numericButton)->active;
if(GTK_IS_WIDGET(frameGrid))gtk_widget_set_sensitive(frameGrid, checked);
if(GTK_IS_WIDGET(labelSchwarz)) gtk_widget_set_sensitive(labelSchwarz, !checked);
if(GTK_IS_WIDGET(entrySchwarz))gtk_widget_set_sensitive(entrySchwarz, !checked);
}
/********************************************************************************/
static void apply_coulomb_orbitals(GtkWidget *Win,gpointer data)
{
GtkWidget** entriestmp = NULL;
G_CONST_RETURN gchar* temp;
gchar* dump;
gint i;
gint j;
GridLimits limitstmp;
gint NumPointstmp[3];
GtkWidget *entries[3][6];
gdouble V[3][3];
GtkWidget* alphaList = g_object_get_data (G_OBJECT (Win), "AlphaList");
GtkWidget* betaList = g_object_get_data (G_OBJECT (Win), "BetaList");
GtkWidget* numericButton = g_object_get_data (G_OBJECT (Win), "NumericButton");
GtkWidget* entrySchwarz = g_object_get_data (G_OBJECT (Win), "EntrySchwarz");
gint* numAlphaOrbs = NULL;
gint* numBetaOrbs = NULL;
gint nAlpha = 0;
gint nBeta = 0;
gdouble integ, normi, normj, overlap;
gchar* result = NULL;
FuncCompCoulomb compute_coulomb = compute_coulomb_integrale_iijj_poisson;
gboolean numeric = FALSE;
gdouble schwarzCutOff = 1e-8;
if(GTK_IS_WIDGET(Win))
{
entriestmp = (GtkWidget **)g_object_get_data(G_OBJECT (Win), "Entries");
}
else return;
if(entriestmp==NULL) return;
if(!GTK_IS_WIDGET(numericButton)) return;
numeric = GTK_TOGGLE_BUTTON (numericButton)->active;
if(!numeric)
{
if(!GTK_IS_WIDGET(entrySchwarz)) return;
schwarzCutOff = atof(gtk_entry_get_text(GTK_ENTRY(entrySchwarz)));
}
destroy_win_list();
if(numeric)
{
for(i=0;i<3;i++)
for(j=0;j<6;j++)
entries[i][j] = entriestmp[i*6+j];
for(i=0;i<3;i++)
{
for(j=3;j<5;j++)
{
temp = gtk_entry_get_text(GTK_ENTRY(entries[i][j]));
dump = NULL;
if(temp && strlen(temp)>0)
{
dump = g_strdup(temp);
delete_first_spaces(dump);
delete_last_spaces(dump);
}
if(dump && strlen(dump)>0 && this_is_a_real(dump))
{
limitstmp.MinMax[j-3][i] = atof(dump);
}
else
{
GtkWidget* message = Message(_("Error : an entry is not a float "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
if(dump) g_free(dump);
}
temp = gtk_entry_get_text(GTK_ENTRY(entries[i][5]));
NumPointstmp[i] = atoi(temp);
if(NumPointstmp[i] <=2)
{
GtkWidget* message = Message(_("Error : The number of points should be > 2. "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
}
for(i=0;i<3;i++)
{
if( limitstmp.MinMax[0][i]> limitstmp.MinMax[1][i])
{
GtkWidget* message = Message(_("Error : The minimal value should be smaller than the maximal value "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
}
for(i=0;i<3;i++)
{
for(j=0;j<3;j++)
{
V[i][j] = 0;
temp = gtk_entry_get_text(GTK_ENTRY(entries[i][j]));
dump = NULL;
if(temp && strlen(temp)>0)
{
dump = g_strdup(temp);
delete_first_spaces(dump);
delete_last_spaces(dump);
}
if(dump && strlen(dump)>0 && this_is_a_real(dump))
{
V[i][j] = atof(dump);
}
else
{
GtkWidget* message = Message(_("Error : an entry is not a float "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
if(dump) g_free(dump);
}
}
for(i=0;i<3;i++)
{
gdouble norm = 0.0;
for(j=0;j<3;j++)
norm += V[i][j]*V[i][j];
if(fabs(norm)<1e-8)
{
GtkWidget* message = Message(_("Error : the norm is equal to 0 "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
for(j=0;j<3;j++)
V[i][j] /= sqrt(norm);
}
for(j=0;j<3;j++) originOfCube[j] = 0;
for(j=0;j<3;j++) firstDirection[j] = V[0][j];
for(j=0;j<3;j++) secondDirection[j] = V[1][j];
for(j=0;j<3;j++) thirdDirection[j] = V[2][j];
for(i=0;i<3;i++)
{
NumPoints[i] =NumPointstmp[i] ;
for(j=0;j<2;j++)
limits.MinMax[j][i] =limitstmp.MinMax[j][i];
}
} /* end if numeric */
CancelCalcul = FALSE;
/* printf("DirName = %s\n",dirName);*/
numAlphaOrbs = get_num_of_selected_orbitals(alphaList, &nAlpha);
numBetaOrbs = get_num_of_selected_orbitals(betaList, &nBeta);
if(nAlpha+nBeta<1)
{
GtkWidget* message = Message(_("Error : You should select at last one orbital"),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
else if(nAlpha+nBeta==1)
{
gint i = -1;
gint typeOrb = -1;
delete_child(Win);
if(nAlpha==1 && numAlphaOrbs)
{
typeOrb = 1;
i = numAlphaOrbs[0];
}
else if(nBeta==1 && numBetaOrbs)
{
typeOrb = 2;
i = numBetaOrbs[0];
}
if(i>-1 && typeOrb>0)
{
gint ii = i+1;
if(numeric)
{
if(compute_coulomb(
NumPoints,limits,
typeOrb, i, typeOrb, i,
&integ, &normi, &normj, &overlap)
)
result = g_strdup_printf(
"<%d|%d> = %lf\n"
"<%d %d|1/r12|%d %d> = %0.12lf Hartree\n",
ii,ii,normi,
ii,ii,ii,ii,integ);
else
result = g_strdup_printf("Canceled? !\n If not see your terminal ");
}
else
{
setTextInProgress(_("Analytic computing of coulomb integral"));
integ = get_coulomb_analytic(typeOrb, i, typeOrb, i, schwarzCutOff);
normi = get_overlap_analytic(typeOrb, i, typeOrb, i);
result = g_strdup_printf(
"<%d|%d> = %lf\n"
"<%d %d|1/r12|%d %d> = %0.12lf Hartree\n",
ii,ii,normi,
ii,ii,ii,ii,integ);
}
}
}
else
{
gint typeOrbi = 1;
gint typeOrbj = 1;
delete_child(Win);
if(numAlphaOrbs)
for(i=0;i<nAlpha;i++)
for(j=i+1;j<nAlpha;j++)
{
gchar* tmp = NULL;
gint ii = numAlphaOrbs[i];
gint jj = numAlphaOrbs[j];
if(CancelCalcul) break;
if(numeric && compute_coulomb(
NumPoints,limits,
typeOrbi, ii, typeOrbj, jj,
&integ, &normi, &normj, &overlap)
)
{
ii++;
jj++;
tmp = g_strdup_printf(
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d %d|1/r12|%d %d> = %0.12lf Hartree\n",
ii,ii,normi,
jj,jj,normj,
ii,jj,overlap,
ii,ii,jj,jj,
integ);
}
else if(!numeric)
{
setTextInProgress(_("Analytic computing of coulomb integral"));
integ = get_coulomb_analytic(typeOrbi, ii, typeOrbj, jj, schwarzCutOff);
normi = get_overlap_analytic(typeOrbi, ii, typeOrbi, ii);
normj = get_overlap_analytic(typeOrbj, jj, typeOrbj, jj);
overlap = get_overlap_analytic(typeOrbi, ii, typeOrbj, jj);
ii++;
jj++;
tmp = g_strdup_printf(
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d %d|1/r12|%d %d> = %0.12lf Hartree\n",
ii,ii,normi,
jj,jj,normj,
ii,jj,overlap,
ii,ii,jj,jj,
integ);
}
if(tmp)
{
gchar* old = result;
if(old)
{
result = g_strdup_printf("%s%s",old,tmp);
g_free(old);
}
else result = g_strdup_printf("%s",tmp);
}
}
typeOrbi = 2;
typeOrbj = 2;
if(numBetaOrbs)
for(i=0;i<nBeta;i++)
for(j=i+1;j<nBeta;j++)
{
gchar* tmp = NULL;
gint ii = numBetaOrbs[i];
gint jj = numBetaOrbs[j];
if(CancelCalcul) break;
if(numeric && compute_coulomb(
NumPoints,limits,
typeOrbi, ii, typeOrbj, jj,
&integ, &normi, &normj, &overlap)
)
{
ii++;
jj++;
tmp = g_strdup_printf(
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d %d|1/r12|%d %d> = %0.12lf Hartree\n",
ii,ii,normi,
jj,jj,normj,
ii,jj,overlap,
ii,ii,jj,jj,
integ);
}
else if(!numeric)
{
setTextInProgress(_("Analytic computing of coulomb integral"));
integ = get_coulomb_analytic(typeOrbi, ii, typeOrbj, jj, schwarzCutOff);
normi = get_overlap_analytic(typeOrbi, ii, typeOrbi, ii);
normj = get_overlap_analytic(typeOrbj, jj, typeOrbj, jj);
overlap = get_overlap_analytic(typeOrbi, ii, typeOrbj, jj);
ii++;
jj++;
tmp = g_strdup_printf(
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d %d|1/r12|%d %d> = %0.12lf Hartree\n",
ii,ii,normi,
jj,jj,normj,
ii,jj,overlap,
ii,ii,jj,jj,
integ);
}
if(tmp)
{
gchar* old = result;
if(old)
{
result = g_strdup_printf("%s%s",old,tmp);
g_free(old);
}
else result = g_strdup_printf("%s",tmp);
}
}
typeOrbi = 1;
typeOrbj = 2;
if(numAlphaOrbs && numBetaOrbs)
for(i=0;i<nAlpha;i++)
for(j=0;j<nBeta;j++)
{
gchar* tmp = NULL;
gint ii = numAlphaOrbs[i];
gint jj = numBetaOrbs[j];
if(CancelCalcul) break;
if(numeric && compute_coulomb(
NumPoints,limits,
typeOrbi, ii, typeOrbj, jj,
&integ, &normi, &normj, &overlap)
)
{
ii++;
jj++;
tmp = g_strdup_printf(
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d %d|1/r12|%d %d> = %0.12lf Hartree\n",
ii,ii,normi,
jj,jj,normj,
ii,jj,overlap,
ii,ii,jj,jj,
integ);
}
else if(!numeric)
{
setTextInProgress(_("Analytic computing of coulomb integral"));
integ = get_coulomb_analytic(typeOrbi, ii, typeOrbj, jj, schwarzCutOff);
normi = get_overlap_analytic(typeOrbi, ii, typeOrbi, ii);
normj = get_overlap_analytic(typeOrbj, jj, typeOrbj, jj);
overlap = get_overlap_analytic(typeOrbi, ii, typeOrbj, jj);
ii++;
jj++;
tmp = g_strdup_printf(
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d %d|1/r12|%d %d> = %0.12lf Hartree\n",
ii,ii,normi,
jj,jj,normj,
ii,jj,overlap,
ii,ii,jj,jj,
integ);
}
if(tmp)
{
gchar* old = result;
if(old)
{
result = g_strdup_printf("%s%s",old,tmp);
g_free(old);
}
else result = g_strdup_printf("%s",tmp);
}
}
}
if(result && !CancelCalcul)
{
GtkWidget* message = MessageTxt(result,_("Result"));
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
gtk_window_set_transient_for(GTK_WINDOW(message),GTK_WINDOW(PrincipalWindow));
}
/*
printf("Selected alpha orbitals : ");
for(i=0;i<nAlpha;i++)
printf("%d ",numAlphaOrbs[i]);
printf("\n");
printf("Selected beta orbitals : ");
for(i=0;i<nBeta;i++)
printf("%d ",numBetaOrbs[i]);
printf("\n");
*/
set_label_title(NULL,0,0);
if(numAlphaOrbs) g_free(numAlphaOrbs);
if(numBetaOrbs) g_free(numBetaOrbs);
if(CancelCalcul) CancelCalcul = FALSE;
}
/********************************************************************************/
static void select_row(GtkWidget* list, gint row)
{
GtkTreePath *path;
gchar* tmp = g_strdup_printf("%d",row);
path = gtk_tree_path_new_from_string (tmp);
g_free(tmp);
if(!list) return;
gtk_tree_selection_select_path (gtk_tree_view_get_selection (GTK_TREE_VIEW (list)), path);
gtk_tree_view_scroll_to_cell(GTK_TREE_VIEW (list), path, NULL, FALSE,0.5,0.5);
gtk_tree_path_free(path);
}
/********************************************************************************/
static GtkWidget* new_gtk_list_orbitals(gint N,gdouble* Energies,gdouble* Occ,gchar** sym, gint* widall)
{
gint i;
gint j;
GtkWidget* gtklist = NULL;
gint *Width = NULL;
gint NlistTitle = 4;
gchar* Titles[] = {"Nr","Energy","Occ.","Sym."};
gchar* List[4];
GtkListStore *store;
GtkTreeModel *model;
GtkCellRenderer *renderer;
GtkTreeViewColumn *column;
GtkTreeSelection *select;
GtkTreeIter iter;
GType* types;
Width = g_malloc(NlistTitle*sizeof(gint));
for (j=0;j<NlistTitle;j++) Width[j] = strlen(Titles[j]);
types = g_malloc(NlistTitle*sizeof(GType));
for (i=0;i<NlistTitle;i++) types[i] = G_TYPE_STRING;
store = gtk_list_store_newv (NlistTitle, types);
g_free(types);
model = GTK_TREE_MODEL (store);
Width[0] = (gint)(Width[0]*10);
Width[1] = (gint)(Width[1]*12);
Width[2] = (gint)(Width[2]*8);
Width[3] = (gint)(Width[3]*14);
*widall = 0;
for (j=0;j<NlistTitle;j++) *widall += Width[j];
*widall += 60;
gtklist = gtk_tree_view_new_with_model (model);
gtk_tree_view_set_rules_hint (GTK_TREE_VIEW (gtklist), TRUE);
gtk_tree_view_set_headers_visible (GTK_TREE_VIEW (gtklist), TRUE);
gtk_tree_view_set_reorderable(GTK_TREE_VIEW (gtklist), FALSE);
for (i=0;i<NlistTitle;i++)
{
column = gtk_tree_view_column_new ();
gtk_tree_view_column_set_title (column, Titles[i]);
renderer = gtk_cell_renderer_text_new ();
gtk_tree_view_column_pack_start (column, renderer, TRUE);
gtk_tree_view_column_set_min_width(column, Width[i]);
gtk_tree_view_column_set_attributes (column, renderer, "text", i, NULL);
gtk_tree_view_append_column (GTK_TREE_VIEW (gtklist), column);
}
g_free( Width);
select = gtk_tree_view_get_selection (GTK_TREE_VIEW (gtklist));
gtk_tree_selection_set_mode (select, GTK_SELECTION_MULTIPLE);
for(i=0;i<N;i++)
{
if(strcmp(sym[i],"DELETED")==0)continue;
List[0] = g_strdup_printf("%i",i+1);
List[1] = g_strdup_printf("%lf",Energies[i]);
List[2] = g_strdup_printf("%lf",Occ[i]);
List[3] = g_strdup(sym[i]);
gtk_list_store_append(store, &iter);
for(j=0;j<4;j++) gtk_list_store_set (store, &iter, j, List[j], -1);
for(j=0;j<4;j++) g_free(List[j]);
}
return gtklist;
}
/********************************************************************************/
static GtkWidget* new_alpha_list(GtkWidget *hboxall)
{
GtkWidget *frame;
GtkWidget *scr;
GtkWidget *vbox;
GtkWidget *gtklist;
gint i;
gint N;
gdouble* Energies;
gdouble* Occ;
gchar** sym;
static gint type = 1;
gint widall = 0;
N = NAlphaOrb;
Energies = g_malloc(N*sizeof(gdouble));
Occ = g_malloc(N*sizeof(gdouble));
sym = g_malloc(N*sizeof(gchar*));
for(i=0;i<N;i++)
{
Energies[i] = EnerAlphaOrbitals[i];
Occ[i] = OccAlphaOrbitals[i];
sym[i] = g_strdup(SymAlphaOrbitals[i]);
}
gtklist = new_gtk_list_orbitals(N,Energies,Occ,sym,&widall);
g_object_set_data(G_OBJECT (gtklist), "Type",&type);
frame = gtk_frame_new (_("Alpha Orbitals"));
gtk_container_set_border_width (GTK_CONTAINER (frame), 1);
gtk_frame_set_shadow_type( GTK_FRAME(frame),GTK_SHADOW_ETCHED_OUT);
gtk_box_pack_start (GTK_BOX (hboxall), frame, TRUE, TRUE, 0);
gtk_widget_show (frame);
vbox = create_vbox(frame);
scr=gtk_scrolled_window_new(NULL,NULL);
gtk_widget_set_size_request(scr,widall,(gint)(ScreenHeight*WIDTHSCR));
gtk_scrolled_window_set_policy (GTK_SCROLLED_WINDOW (scr),GTK_POLICY_AUTOMATIC, GTK_POLICY_AUTOMATIC);
gtk_box_pack_start(GTK_BOX (vbox), scr,TRUE, TRUE, 1);
gtk_container_add(GTK_CONTAINER(scr),gtklist);
set_base_style(gtklist,55000,55000,55000);
for(i=0;i<N;i++) g_free(sym[i]);
g_free(Energies);
g_free(Occ);
g_free(sym);
g_object_set_data(G_OBJECT (hboxall), "AlphaList",gtklist);
return frame;
}
/********************************************************************************/
static GtkWidget* new_beta_list(GtkWidget *hboxall)
{
GtkWidget *frame;
GtkWidget *scr;
GtkWidget *vbox;
GtkWidget *gtklist;
gint i;
gint N;
gdouble* Energies;
gdouble* Occ;
gchar** sym;
static gint type = 2;
gint widall = 0;
N = NBetaOrb;
Energies = g_malloc(N*sizeof(gdouble));
Occ = g_malloc(N*sizeof(gdouble));
sym = g_malloc(N*sizeof(gchar*));
for(i=0;i<N;i++)
{
Energies[i] = EnerBetaOrbitals[i];
Occ[i] = OccBetaOrbitals[i];
sym[i] = g_strdup(SymBetaOrbitals[i]);
}
gtklist = new_gtk_list_orbitals(N,Energies,Occ,sym,&widall);
g_object_set_data(G_OBJECT (gtklist), "Type",&type);
frame = gtk_frame_new (_("Beta Orbitals"));
gtk_container_set_border_width (GTK_CONTAINER (frame), 1);
gtk_frame_set_shadow_type( GTK_FRAME(frame),GTK_SHADOW_ETCHED_OUT);
gtk_box_pack_start (GTK_BOX (hboxall), frame, TRUE, TRUE, 0);
gtk_widget_show (frame);
vbox = create_vbox(frame);
scr=gtk_scrolled_window_new(NULL,NULL);
gtk_scrolled_window_set_policy (GTK_SCROLLED_WINDOW (scr),GTK_POLICY_AUTOMATIC, GTK_POLICY_AUTOMATIC);
gtk_widget_set_size_request(scr,widall,(gint)(ScreenHeight*WIDTHSCR));
gtk_box_pack_start(GTK_BOX (vbox), scr,TRUE, TRUE, 1);
gtk_container_add(GTK_CONTAINER(scr),gtklist);
set_base_style(gtklist,55000,55000,55000);
gtk_widget_show (scr);
gtk_widget_show (gtklist);
for(i=0;i<N;i++) g_free(sym[i]);
g_free(Energies);
g_free(Occ);
g_free(sym);
g_object_set_data(G_OBJECT (hboxall), "BetaList",gtklist);
return frame;
}
/********************************************************************************/
static GtkWidget *create_orbitals_list( GtkWidget *vboxall)
{
GtkWidget *hbox;
hbox = gtk_hbox_new (TRUE, 0);
gtk_box_pack_start (GTK_BOX (vboxall), hbox, TRUE, TRUE, 0);
new_alpha_list(hbox);
new_beta_list(hbox);
return hbox;
}
/********************************************************************************/
void coulomb_orbitals_dlg()
{
GtkWidget *Win;
GtkWidget *frameGrid;
GtkWidget *frameMethod;
GtkWidget *alphaList;
GtkWidget *betaList;
GtkWidget *hbox;
GtkWidget *vboxall;
GtkWidget *vboxwin;
GtkWidget *button;
GtkWidget *label;
GtkWidget** entries;
GtkWidget* numericButton = NULL;
GtkWidget* vbox = NULL;
GtkWidget* entrySchwarz = NULL;
GtkWidget* table = NULL;
if(!GeomOrb)
{
Message(_("Sorry, Please load a file before\n"),_("Error"),TRUE);
return;
}
if(!CoefAlphaOrbitals)
{
Message(_("Sorry, Please load the MO before\n"),_("Error"),TRUE);
return;
}
if(!AOrb && !SAOrb)
{
Message(_("Sorry, Please load the MO before\n"),_("Error"),TRUE);
return;
}
if(!AOAvailable &&(TypeGrid == GABEDIT_TYPEGRID_DDENSITY || TypeGrid == GABEDIT_TYPEGRID_ADENSITY))
{
Message(_("Sorry, No atomic orbitals available.\nPlease use a gabedit file for load : \n"
"Geometry, Molecular and Atomic Orbitals\n"),_("Error"),TRUE);
return;
}
Win = gtk_window_new(GTK_WINDOW_TOPLEVEL);
gtk_window_set_title(GTK_WINDOW(Win),"Comlomb energy <ii|1/r12|jj>");
gtk_window_set_position(GTK_WINDOW(Win),GTK_WIN_POS_CENTER);
gtk_container_set_border_width (GTK_CONTAINER (Win), 5);
gtk_window_set_transient_for(GTK_WINDOW(Win),GTK_WINDOW(PrincipalWindow));
gtk_window_set_modal (GTK_WINDOW (Win), TRUE);
add_glarea_child(Win,"Grid ");
vboxall = create_vbox(Win);
vboxwin = vboxall;
hbox = gtk_hbox_new (TRUE, 0);
gtk_box_pack_start (GTK_BOX (vboxall), hbox, TRUE, TRUE, 0);
label = gtk_label_new("");
gtk_label_set_markup(GTK_LABEL(label), "<span foreground=\"#FF0000\"><big>Use mouse + the Ctrl key (or the shift key) to select several orbitals</big></span>\n");
gtk_box_pack_start (GTK_BOX (hbox), label, TRUE, TRUE, 0);
hbox = create_orbitals_list(vboxall);
alphaList = g_object_get_data (G_OBJECT (hbox), "AlphaList");
g_object_set_data (G_OBJECT (Win), "AlphaList",alphaList);
betaList = g_object_get_data (G_OBJECT (hbox), "BetaList");
g_object_set_data (G_OBJECT (Win), "BetaList",betaList);
gtk_box_pack_start (GTK_BOX (vboxall), gtk_hseparator_new(), TRUE, TRUE, 5);
frameMethod = gtk_frame_new(_("Method"));
gtk_box_pack_start (GTK_BOX (vboxall), frameMethod, TRUE, TRUE, 2);
vbox = create_vbox(frameMethod);
gtk_widget_show_all (vbox);
table = gtk_table_new(2,2,FALSE);
gtk_container_add(GTK_CONTAINER(vbox),table);
gtk_widget_show (table);
numericButton = gtk_check_button_new_with_label (
_("Numerical computing of the Coulomb integral (Large box is recommended)"));
gtk_table_attach(GTK_TABLE(table),numericButton,0,0+2,0,0+1,
(GtkAttachOptions)(GTK_FILL | GTK_EXPAND),
(GtkAttachOptions)(GTK_FILL | GTK_SHRINK),
1,1);
g_signal_connect(G_OBJECT(numericButton), "clicked",(GCallback)numeriButtonClicked,NULL);
g_object_set_data (G_OBJECT (Win), "NumericButton",numericButton);
label = gtk_label_new(_(" Schwarz cutoff : "));
gtk_table_attach(GTK_TABLE(table),label,0,0+1,1,1+1,
(GtkAttachOptions)(GTK_FILL | GTK_SHRINK),
(GtkAttachOptions)(GTK_FILL | GTK_SHRINK),
1,1);
g_object_set_data (G_OBJECT (Win), "LabelSchwarz",label);
g_object_set_data (G_OBJECT (numericButton), "LabelSchwarz",label);
entrySchwarz = gtk_entry_new();
gtk_entry_set_text(GTK_ENTRY(entrySchwarz),"1e-8");
gtk_table_attach(GTK_TABLE(table),entrySchwarz,1,1+1,1,1+1,
(GtkAttachOptions)(GTK_FILL | GTK_EXPAND),
(GtkAttachOptions)(GTK_FILL | GTK_SHRINK),
1,1);
g_object_set_data (G_OBJECT (Win), "EntrySchwarz",entrySchwarz);
g_object_set_data (G_OBJECT (numericButton), "EntrySchwarz",entrySchwarz);
frameGrid = create_grid_frame(vboxall,"Box & Grid");
entries = (GtkWidget**) g_object_get_data (G_OBJECT (frameGrid), "Entries");
g_object_set_data (G_OBJECT (Win), "Entries",entries);
g_object_set_data (G_OBJECT (Win), "FrameGrid",frameGrid);
g_object_set_data (G_OBJECT (numericButton), "FrameGrid",frameGrid);
gtk_widget_set_sensitive(frameGrid, GTK_TOGGLE_BUTTON (numericButton)->active);
if(!AOrb && SAOrb)
{
gtk_button_clicked (GTK_BUTTON (numericButton));
gtk_widget_set_sensitive(numericButton, FALSE);
}
hbox = create_hbox_false(vboxwin);
gtk_widget_realize(Win);
button = create_button(Win,_("OK"));
gtk_box_pack_end (GTK_BOX( hbox), button, FALSE, TRUE, 3);
GTK_WIDGET_SET_FLAGS(button, GTK_CAN_DEFAULT);
gtk_widget_grab_default(button);
gtk_widget_show (button);
g_signal_connect_swapped(G_OBJECT(button), "clicked",(GCallback)apply_coulomb_orbitals,G_OBJECT(Win));
button = create_button(Win,_("Cancel"));
GTK_WIDGET_SET_FLAGS(button, GTK_CAN_DEFAULT);
gtk_box_pack_end (GTK_BOX( hbox), button, FALSE, TRUE, 3);
g_signal_connect_swapped(G_OBJECT(button), "clicked",(GCallback)delete_child, G_OBJECT(Win));
g_signal_connect_swapped(G_OBJECT(button), "clicked",(GCallback)gtk_widget_destroy,G_OBJECT(Win));
gtk_widget_show (button);
gtk_widget_show_all (Win);
if(NAlphaOcc-1>=0)
{
select_row(alphaList,NAlphaOcc-1);
if(NAlphaOcc+1<=NOrb) select_row(alphaList,NAlphaOcc);
}
else
{
select_row(alphaList,0);
if(2<=NOrb) select_row(alphaList,1);
}
}
/********************************************************************************/
void compute_overlap_matrix(gint typeOrb)
{
gint i,j,k,l;
gchar* result = NULL;
gdouble** matrix = NULL;
gdouble** CoefI = CoefAlphaOrbitals;
gdouble** CoefJ = CoefAlphaOrbitals;
gchar* tmp = NULL;
gdouble o;
gint nAll = 0;
gint delta = 0;
gint pos = 0;
gdouble scal;
gchar str[BSIZE];
if(typeOrb != 1)
{
CoefI = CoefBetaOrbitals;
CoefJ = CoefBetaOrbitals;
}
if(NAOrb<1)
{
GtkWidget* message = Message(_("Error : You should read orbitals"),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
if(!AOrb && !SAOrb)
{
GtkWidget* message = Message(_("Sorry, Please load the MO before\n"),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
if(!AOrb && SAOrb)
{
GtkWidget* message = Message(_("Sorry, That does not work with Slater basis set\n"),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
destroy_win_list();
sprintf(str,_("Computing of overlap matrix between orbitals... Please wait"));
setTextInProgress(str);
scal = 0.01;
delta = (gint)(NAOrb*(NAOrb+1)/2*scal);
if(delta<1) delta = 1;
pos = delta;
matrix = g_malloc(NAOrb*sizeof(gdouble*));
for(i=0;i<NAOrb;i++)
{
matrix[i] = g_malloc((i+1)*sizeof(gdouble));
for(j=0;j<=i;j++) matrix[i][j] = 0;
}
progress_orb_txt(0,str,TRUE);
for(k=0;k<NAOrb;k++)
{
if(CancelCalcul) break;
o = overlapCGTF(&AOrb[k],&AOrb[k]);
nAll++;
/* printf("k=%d o = %lf\n",k,o);*/
for(i=0;i<NAOrb;i++)
for(j=0;j<=i;j++)
matrix[i][j] += CoefI[i][k]*CoefJ[j][k]*o;
if(nAll>=pos)
{
pos += delta;
progress_orb_txt(scal,str,FALSE);
}
}
for(k=0;k<NAOrb;k++)
{
/* printf("---->k=%d \n",k);*/
for(l=k+1;l<NAOrb;l++)
{
if(CancelCalcul) break;
o = overlapCGTF(&AOrb[k],&AOrb[l]);
nAll++;
for(i=0;i<NAOrb;i++)
for(j=0;j<=i;j++)
matrix[i][j] += (CoefI[i][k]*CoefJ[j][l]+CoefI[i][l]*CoefJ[j][k])*o;
if(nAll>=pos)
{
pos += delta;
progress_orb_txt(scal,str,FALSE);
}
}
if(CancelCalcul) break;
}
progress_orb_txt(0," ",TRUE);
result = g_malloc(NAOrb*(NAOrb+1)/2*100*sizeof(gchar));
tmp = g_malloc(BSIZE*sizeof(gchar));
if(typeOrb == 1) sprintf(result," Alpha overlap matrix\n");
else sprintf(result," Beta overlap matrix\n");
setTextInProgress(_("Preparation of text to show... Please wait"));
for(i=0;i<NAOrb;i++)
for(j=0;j<=i;j++)
{
if(CancelCalcul) break;
sprintf(tmp,"<%d|%d> = %lf\n",i+1,j+1,matrix[i][j]);
strcat(result,tmp);
if(CancelCalcul) break;
}
g_free(tmp);
progress_orb_txt(0," ",TRUE);
if(result && !CancelCalcul)
{
GtkWidget* message = MessageTxt(result,_("Overlap matrix"));
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
gtk_window_set_transient_for(GTK_WINDOW(message),GTK_WINDOW(PrincipalWindow));
}
if(matrix)
{
for(i=0;i<NAOrb;i++)
if(matrix[i]) g_free(matrix[i]);
g_free(matrix);
}
g_free(result);
}
/********************************************************************************/
gchar* compute_transition_matrix(gint N[],GridLimits limits, gint typeOrbi, gint ii, gint typeOrbj, gint jj,
gdouble* integ, gdouble* pNormi, gdouble* pNormj, gdouble* pOverlap, gboolean numeric)
{
gchar* tmp = NULL;
gdouble m = 0;
if(numeric &&
compute_transition_matrix_numeric( NumPoints,limits, typeOrbi, ii, typeOrbj, jj,
integ, pNormi, pNormj, pOverlap)
)
{
ii++;
jj++;
m = sqrt(integ[0]*integ[0]+integ[1]*integ[1]+integ[2]*integ[2]);
tmp = g_strdup_printf(
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d|vec r|%d> = %lf %lf %lf au (Magnitude=%lf)\n"
"<%d|vec r|%d> = %lf %lf %lf Debye (Magnitude=%lf)\n\n",
ii,ii,*pNormi,
jj,jj,*pNormj,
ii,jj,*pOverlap,
ii,jj, integ[0], integ[1], integ[2], m,
ii,jj, integ[0]*AUTODEB, integ[1]*AUTODEB, integ[2]*AUTODEB, m*AUTODEB
);
}
else if(!numeric)
{
setTextInProgress(_("Analytic computing of coulomb integral"));
compute_transition_matrix_analytic(typeOrbi, ii, typeOrbj, jj, integ);
*pNormi = get_overlap_analytic(typeOrbi, ii, typeOrbi, ii);
*pNormj = get_overlap_analytic(typeOrbj, jj, typeOrbj, jj);
*pOverlap = get_overlap_analytic(typeOrbi, ii, typeOrbj, jj);
ii++;
jj++;
m = sqrt(integ[0]*integ[0]+integ[1]*integ[1]+integ[2]*integ[2]);
tmp = g_strdup_printf(
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d|vec r|%d> = %lf %lf %lf au (Magnitude=%lf)\n"
"<%d|vec r|%d> = %lf %lf %lf Debye (Magnitude=%lf)\n\n",
ii,ii,*pNormi,
jj,jj,*pNormj,
ii,jj,*pOverlap,
ii,jj, integ[0], integ[1], integ[2], m,
ii,jj, integ[0]*AUTODEB, integ[1]*AUTODEB, integ[2]*AUTODEB, m*AUTODEB
);
}
return tmp;
}
/********************************************************************************/
static void apply_transition_matrix(GtkWidget *Win,gpointer data)
{
GtkWidget** entriestmp = NULL;
G_CONST_RETURN gchar* temp;
gchar* dump;
gint i;
gint j;
GridLimits limitstmp;
gint NumPointstmp[3];
GtkWidget *entries[3][6];
gdouble V[3][3];
GtkWidget* alphaList = g_object_get_data (G_OBJECT (Win), "AlphaList");
GtkWidget* betaList = g_object_get_data (G_OBJECT (Win), "BetaList");
GtkWidget* numericButton = g_object_get_data (G_OBJECT (Win), "NumericButton");
gint* numAlphaOrbs = NULL;
gint* numBetaOrbs = NULL;
gint nAlpha = 0;
gint nBeta = 0;
gdouble integ[3], normi, normj, overlap;
gchar* result = NULL;
gboolean numeric = FALSE;
if(GTK_IS_WIDGET(Win))
{
entriestmp = (GtkWidget **)g_object_get_data(G_OBJECT (Win), "Entries");
}
else return;
if(entriestmp==NULL) return;
if(!GTK_IS_WIDGET(numericButton)) return;
numeric = GTK_TOGGLE_BUTTON (numericButton)->active;
destroy_win_list();
if(numeric)
{
for(i=0;i<3;i++)
for(j=0;j<6;j++)
entries[i][j] = entriestmp[i*6+j];
for(i=0;i<3;i++)
{
for(j=3;j<5;j++)
{
temp = gtk_entry_get_text(GTK_ENTRY(entries[i][j]));
dump = NULL;
if(temp && strlen(temp)>0)
{
dump = g_strdup(temp);
delete_first_spaces(dump);
delete_last_spaces(dump);
}
if(dump && strlen(dump)>0 && this_is_a_real(dump))
{
limitstmp.MinMax[j-3][i] = atof(dump);
}
else
{
GtkWidget* message = Message(_("Error : an entry is not a float "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
if(dump) g_free(dump);
}
temp = gtk_entry_get_text(GTK_ENTRY(entries[i][5]));
NumPointstmp[i] = atoi(temp);
if(NumPointstmp[i] <=2)
{
GtkWidget* message = Message(_("Error : The number of points should be > 2. "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
}
for(i=0;i<3;i++)
{
if( limitstmp.MinMax[0][i]> limitstmp.MinMax[1][i])
{
GtkWidget* message = Message(_("Error : The minimal value should be smaller than the maximal value "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
}
for(i=0;i<3;i++)
{
for(j=0;j<3;j++)
{
V[i][j] = 0;
temp = gtk_entry_get_text(GTK_ENTRY(entries[i][j]));
dump = NULL;
if(temp && strlen(temp)>0)
{
dump = g_strdup(temp);
delete_first_spaces(dump);
delete_last_spaces(dump);
}
if(dump && strlen(dump)>0 && this_is_a_real(dump))
{
V[i][j] = atof(dump);
}
else
{
GtkWidget* message = Message(_("Error : an entry is not a float "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
if(dump) g_free(dump);
}
}
for(i=0;i<3;i++)
{
gdouble norm = 0.0;
for(j=0;j<3;j++)
norm += V[i][j]*V[i][j];
if(fabs(norm)<1e-8)
{
GtkWidget* message = Message(_("Error : the norm is equal to 0 "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
for(j=0;j<3;j++)
V[i][j] /= sqrt(norm);
}
for(j=0;j<3;j++) originOfCube[j] = 0;
for(j=0;j<3;j++) firstDirection[j] = V[0][j];
for(j=0;j<3;j++) secondDirection[j] = V[1][j];
for(j=0;j<3;j++) thirdDirection[j] = V[2][j];
for(i=0;i<3;i++)
{
NumPoints[i] =NumPointstmp[i] ;
for(j=0;j<2;j++)
limits.MinMax[j][i] =limitstmp.MinMax[j][i];
}
} /* end if numeric */
CancelCalcul = FALSE;
/* printf("DirName = %s\n",dirName);*/
numAlphaOrbs = get_num_of_selected_orbitals(alphaList, &nAlpha);
numBetaOrbs = get_num_of_selected_orbitals(betaList, &nBeta);
if(nAlpha+nBeta<1)
{
GtkWidget* message = Message(_("Error : You should select at last one orbital"),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
else if(nAlpha+nBeta==1)
{
gint i = -1;
gint typeOrb = -1;
delete_child(Win);
if(nAlpha==1 && numAlphaOrbs)
{
typeOrb = 1;
i = numAlphaOrbs[0];
}
else if(nBeta==1 && numBetaOrbs)
{
typeOrb = 2;
i = numBetaOrbs[0];
}
if(i>-1 && typeOrb>0)
{
result = compute_transition_matrix(
NumPoints,limits,
typeOrb, i, typeOrb, i,
integ, &normi, &normj, &overlap, numeric);
}
}
else
{
gint typeOrbi = 1;
gint typeOrbj = 1;
delete_child(Win);
if(numAlphaOrbs)
for(i=0;i<nAlpha;i++)
for(j=i+1;j<nAlpha;j++)
{
gchar* tmp = NULL;
gint ii = numAlphaOrbs[i];
gint jj = numAlphaOrbs[j];
if(CancelCalcul) break;
tmp = compute_transition_matrix(
NumPoints,limits,
typeOrbi, ii, typeOrbj, jj,
integ, &normi, &normj, &overlap, numeric);
if(tmp)
{
gchar* old = result;
if(old)
{
result = g_strdup_printf("%s%s",old,tmp);
g_free(old);
}
else result = g_strdup_printf("%s",tmp);
}
}
typeOrbi = 2;
typeOrbj = 2;
if(numBetaOrbs)
for(i=0;i<nBeta;i++)
for(j=i+1;j<nBeta;j++)
{
gchar* tmp = NULL;
gint ii = numBetaOrbs[i];
gint jj = numBetaOrbs[j];
if(CancelCalcul) break;
tmp = compute_transition_matrix(
NumPoints,limits,
typeOrbi, ii, typeOrbj, jj,
integ, &normi, &normj, &overlap, numeric);
if(tmp)
{
gchar* old = result;
if(old)
{
result = g_strdup_printf("%s%s",old,tmp);
g_free(old);
}
else result = g_strdup_printf("%s",tmp);
}
}
typeOrbi = 1;
typeOrbj = 2;
if(numAlphaOrbs && numBetaOrbs)
for(i=0;i<nAlpha;i++)
for(j=0;j<nBeta;j++)
{
gchar* tmp = NULL;
gint ii = numAlphaOrbs[i];
gint jj = numBetaOrbs[j];
if(CancelCalcul) break;
tmp = compute_transition_matrix(
NumPoints,limits,
typeOrbi, ii, typeOrbj, jj,
integ, &normi, &normj, &overlap, numeric);
if(tmp)
{
gchar* old = result;
if(old)
{
result = g_strdup_printf("%s%s",old,tmp);
g_free(old);
}
else result = g_strdup_printf("%s",tmp);
}
}
}
if(result && !CancelCalcul)
{
GtkWidget* message = MessageTxt(result,_("Result"));
gtk_window_set_default_size (GTK_WINDOW(message),(gint)(ScreenWidth*0.8),-1);
gtk_widget_set_size_request(message,(gint)(ScreenWidth*0.45),-1);
/* gtk_window_set_modal (GTK_WINDOW (message), TRUE);*/
gtk_window_set_transient_for(GTK_WINDOW(message),GTK_WINDOW(PrincipalWindow));
}
/*
printf("Selected alpha orbitals : ");
for(i=0;i<nAlpha;i++)
printf("%d ",numAlphaOrbs[i]);
printf("\n");
printf("Selected beta orbitals : ");
for(i=0;i<nBeta;i++)
printf("%d ",numBetaOrbs[i]);
printf("\n");
*/
set_label_title(NULL,0,0);
if(numAlphaOrbs) g_free(numAlphaOrbs);
if(numBetaOrbs) g_free(numBetaOrbs);
if(CancelCalcul) CancelCalcul = FALSE;
}
/********************************************************************************/
void transition_matrix_orbitals_dlg()
{
GtkWidget *Win;
GtkWidget *frameGrid;
GtkWidget *frameMethod;
GtkWidget *alphaList;
GtkWidget *betaList;
GtkWidget *hbox;
GtkWidget *vboxall;
GtkWidget *vboxwin;
GtkWidget *button;
GtkWidget *label;
GtkWidget** entries;
GtkWidget* numericButton = NULL;
GtkWidget* vbox = NULL;
GtkWidget* table = NULL;
if(!GeomOrb)
{
Message(_("Sorry, Please load a file before\n"),_("Error"),TRUE);
return;
}
if(!CoefAlphaOrbitals)
{
Message(_("Sorry, Please load the MO before\n"),_("Error"),TRUE);
return;
}
if(!AOrb && !SAOrb)
{
Message(_("Sorry, Please load the MO before\n"),_("Error"),TRUE);
return;
}
if(!AOAvailable &&(TypeGrid == GABEDIT_TYPEGRID_DDENSITY || TypeGrid == GABEDIT_TYPEGRID_ADENSITY))
{
Message(_("Sorry, No atomic orbitals available.\nPlease use a gabedit file for load : \n"
"Geometry, Molecular and Atomic Orbitals\n"),_("Error"),TRUE);
return;
}
Win = gtk_window_new(GTK_WINDOW_TOPLEVEL);
gtk_window_set_title(GTK_WINDOW(Win),"transition matrix element <i|vec r|j>");
gtk_window_set_position(GTK_WINDOW(Win),GTK_WIN_POS_CENTER);
gtk_container_set_border_width (GTK_CONTAINER (Win), 5);
gtk_window_set_transient_for(GTK_WINDOW(Win),GTK_WINDOW(PrincipalWindow));
gtk_window_set_modal (GTK_WINDOW (Win), TRUE);
add_glarea_child(Win,"Grid ");
vboxall = create_vbox(Win);
vboxwin = vboxall;
hbox = gtk_hbox_new (TRUE, 0);
gtk_box_pack_start (GTK_BOX (vboxall), hbox, TRUE, TRUE, 0);
label = gtk_label_new("");
gtk_label_set_markup(GTK_LABEL(label), "<span foreground=\"#FF0000\"><big>Use mouse + the Ctrl key (or the shift key) to select several orbitals</big></span>\n");
gtk_box_pack_start (GTK_BOX (hbox), label, TRUE, TRUE, 0);
hbox = create_orbitals_list(vboxall);
alphaList = g_object_get_data (G_OBJECT (hbox), "AlphaList");
g_object_set_data (G_OBJECT (Win), "AlphaList",alphaList);
betaList = g_object_get_data (G_OBJECT (hbox), "BetaList");
g_object_set_data (G_OBJECT (Win), "BetaList",betaList);
gtk_box_pack_start (GTK_BOX (vboxall), gtk_hseparator_new(), TRUE, TRUE, 5);
frameMethod = gtk_frame_new(_("Method"));
gtk_box_pack_start (GTK_BOX (vboxall), frameMethod, TRUE, TRUE, 2);
vbox = create_vbox(frameMethod);
gtk_widget_show_all (vbox);
table = gtk_table_new(2,2,FALSE);
gtk_container_add(GTK_CONTAINER(vbox),table);
gtk_widget_show (table);
numericButton = gtk_check_button_new_with_label (
_("Numerical computing (Large box is recommended)"));
gtk_table_attach(GTK_TABLE(table),numericButton,0,0+2,0,0+1,
(GtkAttachOptions)(GTK_FILL | GTK_EXPAND),
(GtkAttachOptions)(GTK_FILL | GTK_SHRINK),
1,1);
g_signal_connect(G_OBJECT(numericButton), "clicked",(GCallback)numeriButtonClicked,NULL);
g_object_set_data (G_OBJECT (Win), "NumericButton",numericButton);
frameGrid = create_grid_frame(vboxall,"Box & Grid");
entries = (GtkWidget**) g_object_get_data (G_OBJECT (frameGrid), "Entries");
g_object_set_data (G_OBJECT (Win), "Entries",entries);
g_object_set_data (G_OBJECT (Win), "FrameGrid",frameGrid);
g_object_set_data (G_OBJECT (numericButton), "FrameGrid",frameGrid);
gtk_widget_set_sensitive(frameGrid, GTK_TOGGLE_BUTTON (numericButton)->active);
if(!AOrb && SAOrb)
{
gtk_button_clicked (GTK_BUTTON (numericButton));
gtk_widget_set_sensitive(numericButton, FALSE);
}
hbox = create_hbox_false(vboxwin);
gtk_widget_realize(Win);
button = create_button(Win,_("OK"));
gtk_box_pack_end (GTK_BOX( hbox), button, FALSE, TRUE, 3);
GTK_WIDGET_SET_FLAGS(button, GTK_CAN_DEFAULT);
gtk_widget_grab_default(button);
gtk_widget_show (button);
g_signal_connect_swapped(G_OBJECT(button), "clicked",(GCallback)apply_transition_matrix,G_OBJECT(Win));
button = create_button(Win,_("Cancel"));
GTK_WIDGET_SET_FLAGS(button, GTK_CAN_DEFAULT);
gtk_box_pack_end (GTK_BOX( hbox), button, FALSE, TRUE, 3);
g_signal_connect_swapped(G_OBJECT(button), "clicked",(GCallback)delete_child, G_OBJECT(Win));
g_signal_connect_swapped(G_OBJECT(button), "clicked",(GCallback)gtk_widget_destroy,G_OBJECT(Win));
gtk_widget_show (button);
gtk_widget_show_all (Win);
if(NAlphaOcc-1>=0)
{
select_row(alphaList,NAlphaOcc-1);
if(NAlphaOcc+1<=NOrb) select_row(alphaList,NAlphaOcc);
}
else
{
select_row(alphaList,0);
if(2<=NOrb) select_row(alphaList,1);
}
}
/********************************************************************************/
gchar* compute_spatial_overlapiijj(gint N[],GridLimits limits, gint typeOrbi, gint ii, gint typeOrbj, gint jj,
gdouble* integ, gdouble* pNormi, gdouble* pNormj, gdouble* pOverlap, gboolean numeric, gdouble schwarzCutOff)
{
gchar* tmp = NULL;
if(numeric)
{
if(!compute_spatial_overlapiijj_numeric(N, limits, typeOrbi, ii, typeOrbj, jj,
integ, pNormi, pNormj, pOverlap)) return tmp;
if(CancelCalcul) return tmp;
ii++;
jj++;
tmp = g_strdup_printf(
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d %d|delta(ri,rj)|%d %d> = %0.12lf\n",
ii,ii,*pNormi,
jj,jj,*pNormj,
ii,jj,*pOverlap,
ii,ii,jj,jj, *integ
);
}
else if(!numeric)
{
setTextInProgress(_("Analytic computing of spatial overlap <ii|delta(ri,rj)|jj> integral"));
*integ = compute_spatial_overlap_analytic(typeOrbi, ii, typeOrbj, jj,schwarzCutOff);
if(CancelCalcul) return tmp;
*pNormi = get_overlap_analytic(typeOrbi, ii, typeOrbi, ii);
*pNormj = get_overlap_analytic(typeOrbj, jj, typeOrbj, jj);
*pOverlap = get_overlap_analytic(typeOrbi, ii, typeOrbj, jj);
ii++;
jj++;
tmp = g_strdup_printf(
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d %d|delta(ri,rj)|%d %d> = %0.12lf\n",
ii,ii,*pNormi,
jj,jj,*pNormj,
ii,jj,*pOverlap,
ii,ii,jj,jj, *integ
);
}
return tmp;
}
/********************************************************************************/
static void apply_spatial_overlapiijj(GtkWidget *Win,gpointer data)
{
GtkWidget** entriestmp = NULL;
G_CONST_RETURN gchar* temp;
gchar* dump;
gint i;
gint j;
GridLimits limitstmp;
gint NumPointstmp[3];
GtkWidget *entries[3][6];
gdouble V[3][3];
GtkWidget* alphaList = NULL;
GtkWidget* betaList = NULL;
GtkWidget* numericButton = NULL;
GtkWidget* entrySchwarz = NULL;
gint* numAlphaOrbs = NULL;
gint* numBetaOrbs = NULL;
gint nAlpha = 0;
gint nBeta = 0;
gdouble integ[3], normi, normj, overlap;
gchar* result = NULL;
gboolean numeric = FALSE;
gdouble schwarzCutOff;
if(GTK_IS_WIDGET(Win))
{
entriestmp = (GtkWidget **)g_object_get_data(G_OBJECT (Win), "Entries");
alphaList = g_object_get_data (G_OBJECT (Win), "AlphaList");
betaList = g_object_get_data (G_OBJECT (Win), "BetaList");
numericButton = g_object_get_data (G_OBJECT (Win), "NumericButton");
entrySchwarz = g_object_get_data (G_OBJECT (Win), "EntrySchwarz");
}
else return;
if(entriestmp==NULL) return;
if(!GTK_IS_WIDGET(numericButton)) return;
if(!GTK_IS_WIDGET(entrySchwarz)) return;
temp = gtk_entry_get_text(GTK_ENTRY(entrySchwarz));
schwarzCutOff = atof(temp);
numeric = GTK_TOGGLE_BUTTON (numericButton)->active;
destroy_win_list();
if(numeric)
{
for(i=0;i<3;i++)
for(j=0;j<6;j++)
entries[i][j] = entriestmp[i*6+j];
for(i=0;i<3;i++)
{
for(j=3;j<5;j++)
{
temp = gtk_entry_get_text(GTK_ENTRY(entries[i][j]));
dump = NULL;
if(temp && strlen(temp)>0)
{
dump = g_strdup(temp);
delete_first_spaces(dump);
delete_last_spaces(dump);
}
if(dump && strlen(dump)>0 && this_is_a_real(dump))
{
limitstmp.MinMax[j-3][i] = atof(dump);
}
else
{
GtkWidget* message = Message(_("Error : an entry is not a float "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
if(dump) g_free(dump);
}
temp = gtk_entry_get_text(GTK_ENTRY(entries[i][5]));
NumPointstmp[i] = atoi(temp);
if(NumPointstmp[i] <=2)
{
GtkWidget* message = Message(_("Error : The number of points should be > 2. "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
}
for(i=0;i<3;i++)
{
if( limitstmp.MinMax[0][i]> limitstmp.MinMax[1][i])
{
GtkWidget* message = Message(_("Error : The minimal value should be smaller than the maximal value "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
}
for(i=0;i<3;i++)
{
for(j=0;j<3;j++)
{
V[i][j] = 0;
temp = gtk_entry_get_text(GTK_ENTRY(entries[i][j]));
dump = NULL;
if(temp && strlen(temp)>0)
{
dump = g_strdup(temp);
delete_first_spaces(dump);
delete_last_spaces(dump);
}
if(dump && strlen(dump)>0 && this_is_a_real(dump))
{
V[i][j] = atof(dump);
}
else
{
GtkWidget* message = Message(_("Error : an entry is not a float "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
if(dump) g_free(dump);
}
}
for(i=0;i<3;i++)
{
gdouble norm = 0.0;
for(j=0;j<3;j++)
norm += V[i][j]*V[i][j];
if(fabs(norm)<1e-8)
{
GtkWidget* message = Message(_("Error : the norm is equal to 0 "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
for(j=0;j<3;j++)
V[i][j] /= sqrt(norm);
}
for(j=0;j<3;j++) originOfCube[j] = 0;
for(j=0;j<3;j++) firstDirection[j] = V[0][j];
for(j=0;j<3;j++) secondDirection[j] = V[1][j];
for(j=0;j<3;j++) thirdDirection[j] = V[2][j];
for(i=0;i<3;i++)
{
NumPoints[i] =NumPointstmp[i] ;
for(j=0;j<2;j++)
limits.MinMax[j][i] =limitstmp.MinMax[j][i];
}
} /* end if numeric */
CancelCalcul = FALSE;
/* printf("DirName = %s\n",dirName);*/
numAlphaOrbs = get_num_of_selected_orbitals(alphaList, &nAlpha);
numBetaOrbs = get_num_of_selected_orbitals(betaList, &nBeta);
if(nAlpha+nBeta<1)
{
GtkWidget* message = Message(_("Error : You should select at last one orbital"),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
else if(nAlpha+nBeta==1)
{
gint i = -1;
gint typeOrb = -1;
delete_child(Win);
if(nAlpha==1 && numAlphaOrbs)
{
typeOrb = 1;
i = numAlphaOrbs[0];
}
else if(nBeta==1 && numBetaOrbs)
{
typeOrb = 2;
i = numBetaOrbs[0];
}
if(i>-1 && typeOrb>0)
{
result = compute_spatial_overlapiijj(
NumPoints,limits,
typeOrb, i, typeOrb, i,
integ, &normi, &normj, &overlap, numeric, schwarzCutOff);
}
}
else
{
gint typeOrbi = 1;
gint typeOrbj = 1;
delete_child(Win);
if(numAlphaOrbs)
for(i=0;i<nAlpha;i++)
for(j=i+1;j<nAlpha;j++)
{
gchar* tmp = NULL;
gint ii = numAlphaOrbs[i];
gint jj = numAlphaOrbs[j];
if(CancelCalcul) break;
tmp = compute_spatial_overlapiijj(
NumPoints,limits,
typeOrbi, ii, typeOrbj, jj,
integ, &normi, &normj, &overlap, numeric, schwarzCutOff);
if(tmp)
{
gchar* old = result;
if(old)
{
result = g_strdup_printf("%s%s",old,tmp);
g_free(old);
}
else result = g_strdup_printf("%s",tmp);
}
}
typeOrbi = 2;
typeOrbj = 2;
if(numBetaOrbs)
for(i=0;i<nBeta;i++)
for(j=i+1;j<nBeta;j++)
{
gchar* tmp = NULL;
gint ii = numBetaOrbs[i];
gint jj = numBetaOrbs[j];
if(CancelCalcul) break;
tmp = compute_spatial_overlapiijj(
NumPoints,limits,
typeOrbi, ii, typeOrbj, jj,
integ, &normi, &normj, &overlap, numeric,schwarzCutOff);
if(tmp)
{
gchar* old = result;
if(old)
{
result = g_strdup_printf("%s%s",old,tmp);
g_free(old);
}
else result = g_strdup_printf("%s",tmp);
}
}
typeOrbi = 1;
typeOrbj = 2;
if(numAlphaOrbs && numBetaOrbs)
for(i=0;i<nAlpha;i++)
for(j=0;j<nBeta;j++)
{
gchar* tmp = NULL;
gint ii = numAlphaOrbs[i];
gint jj = numBetaOrbs[j];
if(CancelCalcul) break;
tmp = compute_spatial_overlapiijj(
NumPoints,limits,
typeOrbi, ii, typeOrbj, jj,
integ, &normi, &normj, &overlap, numeric,schwarzCutOff);
if(tmp)
{
gchar* old = result;
if(old)
{
result = g_strdup_printf("%s%s",old,tmp);
g_free(old);
}
else result = g_strdup_printf("%s",tmp);
}
}
}
if(result && !CancelCalcul)
{
GtkWidget* message = MessageTxt(result,_("Result"));
gtk_window_set_default_size (GTK_WINDOW(message),(gint)(ScreenWidth*0.8),-1);
gtk_widget_set_size_request(message,(gint)(ScreenWidth*0.45),-1);
/* gtk_window_set_modal (GTK_WINDOW (message), TRUE);*/
gtk_window_set_transient_for(GTK_WINDOW(message),GTK_WINDOW(PrincipalWindow));
}
/*
printf("Selected alpha orbitals : ");
for(i=0;i<nAlpha;i++)
printf("%d ",numAlphaOrbs[i]);
printf("\n");
printf("Selected beta orbitals : ");
for(i=0;i<nBeta;i++)
printf("%d ",numBetaOrbs[i]);
printf("\n");
*/
set_label_title(NULL,0,0);
if(numAlphaOrbs) g_free(numAlphaOrbs);
if(numBetaOrbs) g_free(numBetaOrbs);
if(CancelCalcul) CancelCalcul = FALSE;
}
/********************************************************************************/
void spatial_overlapiijj_orbitals_dlg()
{
GtkWidget *Win;
GtkWidget *frameGrid;
GtkWidget *frameMethod;
GtkWidget *alphaList;
GtkWidget *betaList;
GtkWidget *hbox;
GtkWidget *vboxall;
GtkWidget *vboxwin;
GtkWidget *button;
GtkWidget *label;
GtkWidget** entries;
GtkWidget* numericButton = NULL;
GtkWidget* vbox = NULL;
GtkWidget* table = NULL;
GtkWidget* entrySchwarz = NULL;
if(!GeomOrb)
{
Message(_("Sorry, Please load a file before\n"),_("Error"),TRUE);
return;
}
if(!CoefAlphaOrbitals)
{
Message(_("Sorry, Please load the MO before\n"),_("Error"),TRUE);
return;
}
if(!AOrb && !SAOrb)
{
Message(_("Sorry, Please load the MO before\n"),_("Error"),TRUE);
return;
}
if(!AOAvailable &&(TypeGrid == GABEDIT_TYPEGRID_DDENSITY || TypeGrid == GABEDIT_TYPEGRID_ADENSITY))
{
Message(_("Sorry, No atomic orbitals available.\nPlease use a gabedit file for load : \n"
"Geometry, Molecular and Atomic Orbitals\n"),_("Error"),TRUE);
return;
}
Win = gtk_window_new(GTK_WINDOW_TOPLEVEL);
gtk_window_set_title(GTK_WINDOW(Win),"Spatial Overlap element <ii|delta(ri,rj)|jj>");
gtk_window_set_position(GTK_WINDOW(Win),GTK_WIN_POS_CENTER);
gtk_container_set_border_width (GTK_CONTAINER (Win), 5);
gtk_window_set_transient_for(GTK_WINDOW(Win),GTK_WINDOW(PrincipalWindow));
gtk_window_set_modal (GTK_WINDOW (Win), TRUE);
add_glarea_child(Win,"Grid ");
vboxall = create_vbox(Win);
vboxwin = vboxall;
hbox = gtk_hbox_new (TRUE, 0);
gtk_box_pack_start (GTK_BOX (vboxall), hbox, TRUE, TRUE, 0);
label = gtk_label_new("");
gtk_label_set_markup(GTK_LABEL(label), "<span foreground=\"#FF0000\"><big>Use mouse + the Ctrl key (or the shift key) to select several orbitals</big></span>\n");
gtk_box_pack_start (GTK_BOX (hbox), label, TRUE, TRUE, 0);
hbox = create_orbitals_list(vboxall);
alphaList = g_object_get_data (G_OBJECT (hbox), "AlphaList");
g_object_set_data (G_OBJECT (Win), "AlphaList",alphaList);
betaList = g_object_get_data (G_OBJECT (hbox), "BetaList");
g_object_set_data (G_OBJECT (Win), "BetaList",betaList);
gtk_box_pack_start (GTK_BOX (vboxall), gtk_hseparator_new(), TRUE, TRUE, 5);
frameMethod = gtk_frame_new(_("Method"));
gtk_box_pack_start (GTK_BOX (vboxall), frameMethod, TRUE, TRUE, 2);
vbox = create_vbox(frameMethod);
gtk_widget_show_all (vbox);
table = gtk_table_new(2,2,FALSE);
gtk_container_add(GTK_CONTAINER(vbox),table);
gtk_widget_show (table);
numericButton = gtk_check_button_new_with_label (
_("Numerical computing (Large box is recommended)"));
gtk_table_attach(GTK_TABLE(table),numericButton,0,0+2,0,0+1,
(GtkAttachOptions)(GTK_FILL | GTK_EXPAND),
(GtkAttachOptions)(GTK_FILL | GTK_SHRINK),
1,1);
g_signal_connect(G_OBJECT(numericButton), "clicked",(GCallback)numeriButtonClicked,NULL);
g_object_set_data (G_OBJECT (Win), "NumericButton",numericButton);
label = gtk_label_new(_(" Schwarz cutoff : "));
gtk_table_attach(GTK_TABLE(table),label,0,0+1,1,1+1,
(GtkAttachOptions)(GTK_FILL | GTK_SHRINK),
(GtkAttachOptions)(GTK_FILL | GTK_SHRINK),
1,1);
g_object_set_data (G_OBJECT (Win), "LabelSchwarz",label);
g_object_set_data (G_OBJECT (numericButton), "LabelSchwarz",label);
entrySchwarz = gtk_entry_new();
gtk_entry_set_text(GTK_ENTRY(entrySchwarz),"1e-8");
gtk_table_attach(GTK_TABLE(table),entrySchwarz,1,1+1,1,1+1,
(GtkAttachOptions)(GTK_FILL | GTK_EXPAND),
(GtkAttachOptions)(GTK_FILL | GTK_SHRINK),
1,1);
g_object_set_data (G_OBJECT (Win), "EntrySchwarz",entrySchwarz);
g_object_set_data (G_OBJECT (numericButton), "EntrySchwarz",entrySchwarz);
frameGrid = create_grid_frame(vboxall,"Box & Grid");
entries = (GtkWidget**) g_object_get_data (G_OBJECT (frameGrid), "Entries");
g_object_set_data (G_OBJECT (Win), "Entries",entries);
g_object_set_data (G_OBJECT (Win), "FrameGrid",frameGrid);
g_object_set_data (G_OBJECT (numericButton), "FrameGrid",frameGrid);
gtk_widget_set_sensitive(frameGrid, GTK_TOGGLE_BUTTON (numericButton)->active);
if(!AOrb && SAOrb)
{
gtk_button_clicked (GTK_BUTTON (numericButton));
gtk_widget_set_sensitive(numericButton, FALSE);
}
hbox = create_hbox_false(vboxwin);
gtk_widget_realize(Win);
button = create_button(Win,_("OK"));
gtk_box_pack_end (GTK_BOX( hbox), button, FALSE, TRUE, 3);
GTK_WIDGET_SET_FLAGS(button, GTK_CAN_DEFAULT);
gtk_widget_grab_default(button);
gtk_widget_show (button);
g_signal_connect_swapped(G_OBJECT(button), "clicked",(GCallback)apply_spatial_overlapiijj,G_OBJECT(Win));
button = create_button(Win,_("Cancel"));
GTK_WIDGET_SET_FLAGS(button, GTK_CAN_DEFAULT);
gtk_box_pack_end (GTK_BOX( hbox), button, FALSE, TRUE, 3);
g_signal_connect_swapped(G_OBJECT(button), "clicked",(GCallback)delete_child, G_OBJECT(Win));
g_signal_connect_swapped(G_OBJECT(button), "clicked",(GCallback)gtk_widget_destroy,G_OBJECT(Win));
gtk_widget_show (button);
gtk_widget_show_all (Win);
if(NAlphaOcc-1>=0)
{
select_row(alphaList,NAlphaOcc-1);
if(NAlphaOcc+1<=NOrb) select_row(alphaList,NAlphaOcc);
}
else
{
select_row(alphaList,0);
if(2<=NOrb) select_row(alphaList,1);
}
}
/********************************************************************************/
gchar* compute_spatial_overlapij(gint N[],GridLimits limits, gint typeOrbi, gint ii, gint typeOrbj, gint jj,
gdouble* integ, gdouble* pNormi, gdouble* pNormj, gdouble* pOverlap)
{
gchar* tmp = NULL;
if(!compute_spatial_overlapij_numeric(N, limits, typeOrbi, ii, typeOrbj, jj,
integ, pNormi, pNormj, pOverlap)) return tmp;
if(CancelCalcul) return tmp;
ii++;
jj++;
tmp = g_strdup_printf(
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"<%d|%d> = %lf\n"
"< |%d| | |%d|> = %0.12lf\n",
ii,ii,*pNormi,
jj,jj,*pNormj,
ii,jj,*pOverlap,
ii,jj, *integ
);
return tmp;
}
/********************************************************************************/
static void apply_spatial_overlapij(GtkWidget *Win,gpointer data)
{
GtkWidget** entriestmp = NULL;
G_CONST_RETURN gchar* temp;
gchar* dump;
gint i;
gint j;
GridLimits limitstmp;
gint NumPointstmp[3];
GtkWidget *entries[3][6];
gdouble V[3][3];
GtkWidget* alphaList = NULL;
GtkWidget* betaList = NULL;
gint* numAlphaOrbs = NULL;
gint* numBetaOrbs = NULL;
gint nAlpha = 0;
gint nBeta = 0;
gdouble integ[3], normi, normj, overlap;
gchar* result = NULL;
if(GTK_IS_WIDGET(Win))
{
entriestmp = (GtkWidget **)g_object_get_data(G_OBJECT (Win), "Entries");
alphaList = g_object_get_data (G_OBJECT (Win), "AlphaList");
betaList = g_object_get_data (G_OBJECT (Win), "BetaList");
}
else return;
if(entriestmp==NULL) return;
destroy_win_list();
for(i=0;i<3;i++)
for(j=0;j<6;j++)
entries[i][j] = entriestmp[i*6+j];
for(i=0;i<3;i++)
{
for(j=3;j<5;j++)
{
temp = gtk_entry_get_text(GTK_ENTRY(entries[i][j]));
dump = NULL;
if(temp && strlen(temp)>0)
{
dump = g_strdup(temp);
delete_first_spaces(dump);
delete_last_spaces(dump);
}
if(dump && strlen(dump)>0 && this_is_a_real(dump))
{
limitstmp.MinMax[j-3][i] = atof(dump);
}
else
{
GtkWidget* message = Message(_("Error : an entry is not a float "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
if(dump) g_free(dump);
}
temp = gtk_entry_get_text(GTK_ENTRY(entries[i][5]));
NumPointstmp[i] = atoi(temp);
if(NumPointstmp[i] <=2)
{
GtkWidget* message = Message(_("Error : The number of points should be > 2. "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
}
for(i=0;i<3;i++)
{
if( limitstmp.MinMax[0][i]> limitstmp.MinMax[1][i])
{
GtkWidget* message = Message(_("Error : The minimal value should be smaller than the maximal value "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
}
for(i=0;i<3;i++)
{
for(j=0;j<3;j++)
{
V[i][j] = 0;
temp = gtk_entry_get_text(GTK_ENTRY(entries[i][j]));
dump = NULL;
if(temp && strlen(temp)>0)
{
dump = g_strdup(temp);
delete_first_spaces(dump);
delete_last_spaces(dump);
}
if(dump && strlen(dump)>0 && this_is_a_real(dump))
{
V[i][j] = atof(dump);
}
else
{
GtkWidget* message = Message(_("Error : an entry is not a float "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
if(dump) g_free(dump);
}
}
for(i=0;i<3;i++)
{
gdouble norm = 0.0;
for(j=0;j<3;j++)
norm += V[i][j]*V[i][j];
if(fabs(norm)<1e-8)
{
GtkWidget* message = Message(_("Error : the norm is equal to 0 "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
for(j=0;j<3;j++)
V[i][j] /= sqrt(norm);
}
for(j=0;j<3;j++) originOfCube[j] = 0;
for(j=0;j<3;j++) firstDirection[j] = V[0][j];
for(j=0;j<3;j++) secondDirection[j] = V[1][j];
for(j=0;j<3;j++) thirdDirection[j] = V[2][j];
for(i=0;i<3;i++)
{
NumPoints[i] =NumPointstmp[i] ;
for(j=0;j<2;j++)
limits.MinMax[j][i] =limitstmp.MinMax[j][i];
}
CancelCalcul = FALSE;
/* printf("DirName = %s\n",dirName);*/
numAlphaOrbs = get_num_of_selected_orbitals(alphaList, &nAlpha);
numBetaOrbs = get_num_of_selected_orbitals(betaList, &nBeta);
if(nAlpha+nBeta<1)
{
GtkWidget* message = Message(_("Error : You should select at last one orbital"),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
else if(nAlpha+nBeta==1)
{
gint i = -1;
gint typeOrb = -1;
delete_child(Win);
if(nAlpha==1 && numAlphaOrbs)
{
typeOrb = 1;
i = numAlphaOrbs[0];
}
else if(nBeta==1 && numBetaOrbs)
{
typeOrb = 2;
i = numBetaOrbs[0];
}
if(i>-1 && typeOrb>0)
{
result = compute_spatial_overlapij( NumPoints,limits, typeOrb, i, typeOrb, i, integ, &normi, &normj, &overlap);
}
}
else
{
gint typeOrbi = 1;
gint typeOrbj = 1;
delete_child(Win);
if(numAlphaOrbs)
for(i=0;i<nAlpha;i++)
for(j=i+1;j<nAlpha;j++)
{
gchar* tmp = NULL;
gint ii = numAlphaOrbs[i];
gint jj = numAlphaOrbs[j];
if(CancelCalcul) break;
tmp = compute_spatial_overlapij( NumPoints,limits, typeOrbi, ii, typeOrbj, jj, integ, &normi, &normj, &overlap);
if(tmp)
{
gchar* old = result;
if(old)
{
result = g_strdup_printf("%s%s",old,tmp);
g_free(old);
}
else result = g_strdup_printf("%s",tmp);
}
}
typeOrbi = 2;
typeOrbj = 2;
if(numBetaOrbs)
for(i=0;i<nBeta;i++)
for(j=i+1;j<nBeta;j++)
{
gchar* tmp = NULL;
gint ii = numBetaOrbs[i];
gint jj = numBetaOrbs[j];
if(CancelCalcul) break;
tmp = compute_spatial_overlapij( NumPoints,limits, typeOrbi, ii, typeOrbj, jj, integ, &normi, &normj, &overlap);
if(tmp)
{
gchar* old = result;
if(old)
{
result = g_strdup_printf("%s%s",old,tmp);
g_free(old);
}
else result = g_strdup_printf("%s",tmp);
}
}
typeOrbi = 1;
typeOrbj = 2;
if(numAlphaOrbs && numBetaOrbs)
for(i=0;i<nAlpha;i++)
for(j=0;j<nBeta;j++)
{
gchar* tmp = NULL;
gint ii = numAlphaOrbs[i];
gint jj = numBetaOrbs[j];
if(CancelCalcul) break;
tmp = compute_spatial_overlapij( NumPoints,limits, typeOrbi, ii, typeOrbj, jj, integ, &normi, &normj, &overlap);
if(tmp)
{
gchar* old = result;
if(old)
{
result = g_strdup_printf("%s%s",old,tmp);
g_free(old);
}
else result = g_strdup_printf("%s",tmp);
}
}
}
if(result && !CancelCalcul)
{
GtkWidget* message = MessageTxt(result,_("Result"));
gtk_window_set_default_size (GTK_WINDOW(message),(gint)(ScreenWidth*0.8),-1);
gtk_widget_set_size_request(message,(gint)(ScreenWidth*0.45),-1);
/* gtk_window_set_modal (GTK_WINDOW (message), TRUE);*/
gtk_window_set_transient_for(GTK_WINDOW(message),GTK_WINDOW(PrincipalWindow));
}
/*
printf("Selected alpha orbitals : ");
for(i=0;i<nAlpha;i++)
printf("%d ",numAlphaOrbs[i]);
printf("\n");
printf("Selected beta orbitals : ");
for(i=0;i<nBeta;i++)
printf("%d ",numBetaOrbs[i]);
printf("\n");
*/
set_label_title(NULL,0,0);
if(numAlphaOrbs) g_free(numAlphaOrbs);
if(numBetaOrbs) g_free(numBetaOrbs);
if(CancelCalcul) CancelCalcul = FALSE;
}
/********************************************************************************/
void spatial_overlapij_orbitals_dlg()
{
GtkWidget *Win;
GtkWidget *frameGrid;
GtkWidget *alphaList;
GtkWidget *betaList;
GtkWidget *hbox;
GtkWidget *vboxall;
GtkWidget *vboxwin;
GtkWidget *button;
GtkWidget *label;
GtkWidget** entries;
if(!GeomOrb)
{
Message(_("Sorry, Please load a file before\n"),_("Error"),TRUE);
return;
}
if(!CoefAlphaOrbitals)
{
Message(_("Sorry, Please load the MO before\n"),_("Error"),TRUE);
return;
}
if(!AOrb && !SAOrb)
{
Message(_("Sorry, Please load the MO before\n"),_("Error"),TRUE);
return;
}
if(!AOAvailable &&(TypeGrid == GABEDIT_TYPEGRID_DDENSITY || TypeGrid == GABEDIT_TYPEGRID_ADENSITY))
{
Message(_("Sorry, No atomic orbitals available.\nPlease use a gabedit file for load : \n"
"Geometry, Molecular and Atomic Orbitals\n"),_("Error"),TRUE);
return;
}
Win = gtk_window_new(GTK_WINDOW_TOPLEVEL);
gtk_window_set_title(GTK_WINDOW(Win),"Spatial Overlap element < |i| | |j|>");
gtk_window_set_position(GTK_WINDOW(Win),GTK_WIN_POS_CENTER);
gtk_container_set_border_width (GTK_CONTAINER (Win), 5);
gtk_window_set_transient_for(GTK_WINDOW(Win),GTK_WINDOW(PrincipalWindow));
gtk_window_set_modal (GTK_WINDOW (Win), TRUE);
add_glarea_child(Win,"Grid ");
vboxall = create_vbox(Win);
vboxwin = vboxall;
hbox = gtk_hbox_new (TRUE, 0);
gtk_box_pack_start (GTK_BOX (vboxall), hbox, TRUE, TRUE, 0);
label = gtk_label_new("");
gtk_label_set_markup(GTK_LABEL(label), "<span foreground=\"#FF0000\"><big>Use mouse + the Ctrl key (or the shift key) to select several orbitals</big></span>\n");
gtk_box_pack_start (GTK_BOX (hbox), label, TRUE, TRUE, 0);
hbox = create_orbitals_list(vboxall);
alphaList = g_object_get_data (G_OBJECT (hbox), "AlphaList");
g_object_set_data (G_OBJECT (Win), "AlphaList",alphaList);
betaList = g_object_get_data (G_OBJECT (hbox), "BetaList");
g_object_set_data (G_OBJECT (Win), "BetaList",betaList);
gtk_box_pack_start (GTK_BOX (vboxall), gtk_hseparator_new(), TRUE, TRUE, 5);
frameGrid = create_grid_frame(vboxall,"Box & Grid");
entries = (GtkWidget**) g_object_get_data (G_OBJECT (frameGrid), "Entries");
g_object_set_data (G_OBJECT (Win), "Entries",entries);
g_object_set_data (G_OBJECT (Win), "FrameGrid",frameGrid);
gtk_widget_set_sensitive(frameGrid, TRUE);
hbox = create_hbox_false(vboxwin);
gtk_widget_realize(Win);
button = create_button(Win,_("OK"));
gtk_box_pack_end (GTK_BOX( hbox), button, FALSE, TRUE, 3);
GTK_WIDGET_SET_FLAGS(button, GTK_CAN_DEFAULT);
gtk_widget_grab_default(button);
gtk_widget_show (button);
g_signal_connect_swapped(G_OBJECT(button), "clicked",(GCallback)apply_spatial_overlapij,G_OBJECT(Win));
button = create_button(Win,_("Cancel"));
GTK_WIDGET_SET_FLAGS(button, GTK_CAN_DEFAULT);
gtk_box_pack_end (GTK_BOX( hbox), button, FALSE, TRUE, 3);
g_signal_connect_swapped(G_OBJECT(button), "clicked",(GCallback)delete_child, G_OBJECT(Win));
g_signal_connect_swapped(G_OBJECT(button), "clicked",(GCallback)gtk_widget_destroy,G_OBJECT(Win));
gtk_widget_show (button);
gtk_widget_show_all (Win);
if(NAlphaOcc-1>=0)
{
select_row(alphaList,NAlphaOcc-1);
if(NAlphaOcc+1<=NOrb) select_row(alphaList,NAlphaOcc);
}
else
{
select_row(alphaList,0);
if(2<=NOrb) select_row(alphaList,1);
}
}
/************************************************************************************************************/
static void setPartialChargesToCalculated(GtkWidget *win)
{
gint i;
gdouble* charges = NULL;
if(GTK_IS_WIDGET(win)) charges = g_object_get_data(G_OBJECT (win), "Charges");
if(!charges) return;
for(i=0;i<Ncenters;i++)
GeomOrb[i].partialCharge = charges[i];
glarea_rafresh(GLArea);
}
/************************************************************************************************************/
static void destroyCalculatedChargesDlg(GtkWidget *win)
{
gdouble* charges = NULL;
if(GTK_IS_WIDGET(win)) charges = g_object_get_data(G_OBJECT (win), "Charges");
if(charges)
g_free(charges);
if(GTK_IS_WIDGET(win)) delete_child(win);
if(GTK_IS_WIDGET(win)) gtk_widget_destroy(win);
}
/********************************************************************************/
static GtkWidget* showCalculatedChargesDlg(gchar *message,gchar *title,gdouble* charges)
{
GtkWidget *dlgWin = NULL;
GtkWidget *frame;
GtkWidget *vboxframe;
GtkWidget *txtWid;
GtkWidget *button;
dlgWin = gtk_dialog_new();
gtk_widget_realize(GTK_WIDGET(dlgWin));
gtk_window_set_title(GTK_WINDOW(dlgWin),title);
gtk_window_set_position(GTK_WINDOW(dlgWin),GTK_WIN_POS_CENTER);
gtk_window_set_modal (GTK_WINDOW (dlgWin), TRUE);
gtk_window_set_transient_for(GTK_WINDOW(dlgWin),GTK_WINDOW(PrincipalWindow));
g_signal_connect(G_OBJECT(dlgWin), "delete_event", (GCallback)destroyCalculatedChargesDlg, NULL);
frame = gtk_frame_new (NULL);
gtk_frame_set_shadow_type( GTK_FRAME(frame),GTK_SHADOW_ETCHED_OUT);
gtk_container_set_border_width (GTK_CONTAINER (frame), 5);
gtk_box_pack_start( GTK_BOX(GTK_DIALOG(dlgWin)->vbox), frame,TRUE,TRUE,0);
gtk_widget_show (frame);
vboxframe = create_vbox(frame);
txtWid = create_text_widget(vboxframe,NULL,&frame);
if(message) gabedit_text_insert (GABEDIT_TEXT(txtWid), NULL, NULL, NULL,message,-1);
gtk_box_set_homogeneous (GTK_BOX( GTK_DIALOG(dlgWin)->action_area), FALSE);
button = create_button(dlgWin,_("Partial charges of molecule <= Calculated charges"));
gtk_box_pack_end (GTK_BOX( GTK_DIALOG(dlgWin)->action_area), button, FALSE, TRUE, 5);
GTK_WIDGET_SET_FLAGS(button, GTK_CAN_DEFAULT);
gtk_widget_grab_default(button);
g_signal_connect_swapped(G_OBJECT(button), "clicked", (GCallback)setPartialChargesToCalculated, GTK_OBJECT(dlgWin));
button = create_button(dlgWin,"Close");
gtk_box_pack_end (GTK_BOX( GTK_DIALOG(dlgWin)->action_area), button, FALSE, TRUE, 5);
GTK_WIDGET_SET_FLAGS(button, GTK_CAN_DEFAULT);
gtk_widget_grab_default(button);
g_signal_connect_swapped(G_OBJECT(button), "clicked", (GCallback)destroyCalculatedChargesDlg, GTK_OBJECT(dlgWin));
add_button_windows(title,dlgWin);
gtk_window_set_default_size (GTK_WINDOW(dlgWin), (gint)(ScreenHeight*0.6), (gint)(ScreenHeight*0.5));
gtk_widget_show_all(dlgWin);
g_object_set_data(G_OBJECT (dlgWin), "Charges",charges);
return dlgWin;
}
/********************************************************************************/
void compute_mulliken_charges()
{
gint i,k,l;
gchar* result = NULL;
gdouble* charges = NULL;
gchar* tmp = NULL;
gdouble o;
gint nAll = 0;
gint delta = 0;
gint pos = 0;
gdouble scal;
gchar str[BSIZE];
gint kk=0;
if(Ncenters<1) return;
if(!AOrb && (!SAOrb || !SOverlaps)) return;
destroy_win_list();
sprintf(str,_("Computing of mulliken charges... Please wait"));
setTextInProgress(str);
scal = 0.01;
delta = (gint)(NAOrb*(NAOrb+1)/2*scal);
if(delta<1) delta = 1;
pos = delta;
charges = g_malloc(Ncenters*sizeof(gdouble));
for(i=0;i<Ncenters;i++) charges[i] = GeomOrb[i].nuclearCharge;
progress_orb_txt(0,str,TRUE);
kk = 0;
for(k=0;k<NAOrb;k++)
{
gint ic = (AOrb)?AOrb[k].NumCenter:SAOrb[k].NumCenter;
for(l=0;l<=k;l++)
{
gint jc = (AOrb)?AOrb[l].NumCenter:SAOrb[l].NumCenter;
gint fact = 1;
if(CancelCalcul) break;
if(AOrb) o = overlapCGTF(&AOrb[k],&AOrb[l])*fact;
else o = SOverlaps[kk++]*fact;
/* printf("k=%d o = %lf\n",k,o);*/
for(i=0;i<NAlphaOcc;i++) charges[ic] -= OccAlphaOrbitals[i]*CoefAlphaOrbitals[i][k]*CoefAlphaOrbitals[i][l]*o;
for(i=0;i<NBetaOcc;i++) charges[ic] -= OccBetaOrbitals[i]*CoefBetaOrbitals[i][k]*CoefBetaOrbitals[i][l]*o;
if(k!=l)
{
for(i=0;i<NAlphaOcc;i++) charges[jc] -= OccAlphaOrbitals[i]*CoefAlphaOrbitals[i][k]*CoefAlphaOrbitals[i][l]*o;
for(i=0;i<NBetaOcc;i++) charges[jc] -= OccBetaOrbitals[i]*CoefBetaOrbitals[i][k]*CoefBetaOrbitals[i][l]*o;
}
nAll++;
if(nAll>=pos)
{
pos += delta;
progress_orb_txt(scal,str,FALSE);
}
}
}
progress_orb_txt(0," ",TRUE);
result = g_malloc(Ncenters*100*sizeof(gchar));
tmp = g_malloc(BSIZE*sizeof(gchar));
sprintf(result," Mulliken charges\n");
setTextInProgress(_("Preparation of text to show... Please wait"));
for(i=0;i<Ncenters;i++)
{
if(CancelCalcul) break;
sprintf(tmp,"Atom# %d : %lf\n",i+1,charges[i]);
strcat(result,tmp);
if(CancelCalcul) break;
}
g_free(tmp);
progress_orb_txt(0," ",TRUE);
if(result && !CancelCalcul)
{
GtkWidget* message = showCalculatedChargesDlg(result,"Mulliken charges",charges);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
gtk_window_set_transient_for(GTK_WINDOW(message),GTK_WINDOW(PrincipalWindow));
}
g_free(result);
}
/************************************************************************************************************/
static void setBondOrdersToCalculated(GtkWidget *win)
{
gint i;
gint j;
gdouble* bondOrders = NULL;
if(GTK_IS_WIDGET(win)) bondOrders = g_object_get_data(G_OBJECT (win), "BondOrders");
if(!bondOrders) return;
freeBondsOrb();
if(Ncenters<1) return ;
for(i = 0;i<Ncenters;i++)
{
for(j=i+1;j<Ncenters;j++)
{
gint ii = i*Ncenters + j - i*(i+1)/2;
if(i>j) ii = j*Ncenters + i - j*(j+1)/2;
if((gint)(bondOrders[ii]+0.5)==1)
{
BondType* A=g_malloc(sizeof(BondType));
A->n1 = i;
A->n2 = j;
A->bondType = GABEDIT_BONDTYPE_SINGLE;
BondsOrb = g_list_append(BondsOrb,A);
}
else if((gint)(bondOrders[ii]+0.5)==2)
{
BondType* A=g_malloc(sizeof(BondType));
A->n1 = i;
A->n2 = j;
A->bondType = GABEDIT_BONDTYPE_DOUBLE;
BondsOrb = g_list_append(BondsOrb,A);
}
else if((gint)(bondOrders[ii]+0.5)==3)
{
BondType* A=g_malloc(sizeof(BondType));
A->n1 = i;
A->n2 = j;
A->bondType = GABEDIT_BONDTYPE_TRIPLE;
BondsOrb = g_list_append(BondsOrb,A);
}
else
if(ShowHBondOrb && hbonded(i,j))
{
BondType* A=g_malloc(sizeof(BondType));
A->n1 = i;
A->n2 = j;
A->bondType = GABEDIT_BONDTYPE_HYDROGEN;
BondsOrb = g_list_append(BondsOrb,A);
}
}
}
RebuildGeom = TRUE;
glarea_rafresh(GLArea);
}
/************************************************************************************************************/
static void destroyCalculatedBondOrdersDlg(GtkWidget *win)
{
gdouble* bondOrders = NULL;
if(GTK_IS_WIDGET(win)) bondOrders = g_object_get_data(G_OBJECT (win), "BondOrders");
if(bondOrders) g_free(bondOrders);
if(GTK_IS_WIDGET(win)) delete_child(win);
if(GTK_IS_WIDGET(win)) gtk_widget_destroy(win);
}
/********************************************************************************/
static GtkWidget* showCalculatedBondOrdersDlg(gchar *message,gchar *title,gdouble* bondOrders)
{
GtkWidget *dlgWin = NULL;
GtkWidget *frame;
GtkWidget *vboxframe;
GtkWidget *txtWid;
GtkWidget *button;
dlgWin = gtk_dialog_new();
gtk_widget_realize(GTK_WIDGET(dlgWin));
gtk_window_set_title(GTK_WINDOW(dlgWin),title);
gtk_window_set_position(GTK_WINDOW(dlgWin),GTK_WIN_POS_CENTER);
gtk_window_set_modal (GTK_WINDOW (dlgWin), TRUE);
gtk_window_set_transient_for(GTK_WINDOW(dlgWin),GTK_WINDOW(PrincipalWindow));
g_signal_connect(G_OBJECT(dlgWin), "delete_event", (GCallback)destroyCalculatedBondOrdersDlg, NULL);
frame = gtk_frame_new (NULL);
gtk_frame_set_shadow_type( GTK_FRAME(frame),GTK_SHADOW_ETCHED_OUT);
gtk_container_set_border_width (GTK_CONTAINER (frame), 5);
gtk_box_pack_start( GTK_BOX(GTK_DIALOG(dlgWin)->vbox), frame,TRUE,TRUE,0);
gtk_widget_show (frame);
vboxframe = create_vbox(frame);
txtWid = create_text_widget(vboxframe,NULL,&frame);
if(message) gabedit_text_insert (GABEDIT_TEXT(txtWid), NULL, NULL, NULL,message,-1);
gtk_box_set_homogeneous (GTK_BOX( GTK_DIALOG(dlgWin)->action_area), FALSE);
button = create_button(dlgWin,_("Multiple bonds <= Calculated bondOrders"));
gtk_box_pack_end (GTK_BOX( GTK_DIALOG(dlgWin)->action_area), button, FALSE, TRUE, 5);
GTK_WIDGET_SET_FLAGS(button, GTK_CAN_DEFAULT);
gtk_widget_grab_default(button);
g_signal_connect_swapped(G_OBJECT(button), "clicked", (GCallback)setBondOrdersToCalculated, GTK_OBJECT(dlgWin));
button = create_button(dlgWin,"Close");
gtk_box_pack_end (GTK_BOX( GTK_DIALOG(dlgWin)->action_area), button, FALSE, TRUE, 5);
GTK_WIDGET_SET_FLAGS(button, GTK_CAN_DEFAULT);
gtk_widget_grab_default(button);
g_signal_connect_swapped(G_OBJECT(button), "clicked", (GCallback)destroyCalculatedBondOrdersDlg, GTK_OBJECT(dlgWin));
add_button_windows(title,dlgWin);
gtk_window_set_default_size (GTK_WINDOW(dlgWin), (gint)(ScreenHeight*0.6), (gint)(ScreenHeight*0.5));
gtk_widget_show_all(dlgWin);
g_object_set_data(G_OBJECT (dlgWin), "BondOrders",bondOrders);
return dlgWin;
}
/********************************************************************************/
void compute_bondOrders()
{
gint i,j,k,l,m;
gchar* result = NULL;
gdouble* bondOrders = NULL;
gchar* tmp = NULL;
gdouble o;
gint nAll = 0;
gint delta = 0;
gint pos = 0;
gdouble scal;
gchar str[BSIZE];
gdouble** S = NULL;
gdouble** Pa = NULL;
gdouble** Pb = NULL;
gdouble** PS = NULL;
gint n2 = Ncenters*(Ncenters+1)/2;
gint kk;
if(Ncenters<1) return;
if(!AOrb && (!SAOrb || !SOverlaps)) return;
destroy_win_list();
sprintf(str,_("Computing of bond order matrix... Please wait"));
setTextInProgress(str);
scal = 0.01;
delta = (gint)(NAOrb*(NAOrb+1)/2*scal);
if(delta<1) delta = 1;
pos = delta;
bondOrders = g_malloc(n2*sizeof(gdouble));
for(i=0;i<n2;i++) bondOrders[i] = 0;
S = g_malloc(NAOrb*sizeof(gdouble*));
for(i=0;i<NAOrb;i++) S[i] = g_malloc(NAOrb*sizeof(gdouble));
for(i=0;i<NAOrb;i++)
for(j=0;j<NAOrb;j++) S[i][j] = 0;
Pa = g_malloc(NAOrb*sizeof(gdouble*));
for(i=0;i<NAOrb;i++) Pa[i] = g_malloc(NAOrb*sizeof(gdouble));
for(i=0;i<NAOrb;i++)
for(j=0;j<NAOrb;j++) Pa[i][j] = 0;
Pb = g_malloc(NAOrb*sizeof(gdouble*));
for(i=0;i<NAOrb;i++) Pb[i] = g_malloc(NAOrb*sizeof(gdouble));
for(i=0;i<NAOrb;i++)
for(j=0;j<NAOrb;j++) Pb[i][j] = 0;
PS = g_malloc(NAOrb*sizeof(gdouble*));
for(i=0;i<NAOrb;i++) PS[i] = g_malloc(NAOrb*sizeof(gdouble));
for(i=0;i<NAOrb;i++)
for(j=0;j<NAOrb;j++) PS[i][j] = 0;
progress_orb_txt(0,str,TRUE);
kk = 0;
for(k=0;k<NAOrb;k++)
{
for(l=0;l<=k;l++)
{
double s = 0;
if(CancelCalcul) break;
if(AOrb) o = overlapCGTF(&AOrb[k],&AOrb[l]);
else o = SOverlaps[kk++];
S[k][l] = o;
if(k!=l) S[l][k] = S[k][l];
s = 0;
for(i=0;i<NAOrb;i++)
s += OccAlphaOrbitals[i]*CoefAlphaOrbitals[i][k]*CoefAlphaOrbitals[i][l];
Pa[k][l] += s;
if(k!=l) Pa[l][k] += s;
s = 0;
for(i=0;i<NAOrb;i++)
s += OccBetaOrbitals[i]*CoefBetaOrbitals[i][k]*CoefBetaOrbitals[i][l];
Pb[k][l] += s;
if(k!=l) Pb[l][k] += s;
nAll++;
if(nAll>=pos)
{
pos += delta;
progress_orb_txt(scal,str,FALSE);
}
}
}
for(k=0;k<NAOrb;k++)
for(l=0;l<NAOrb;l++)
{
PS[k][l] = 0;
for(m=0;m<NAOrb;m++) PS[k][l] += Pa[k][m]*S[m][l];
}
/*
printf("Density matrix alpha\n");
for(k=0;k<NAOrb;k++) {for(l=0;l<=k;l++) printf("%f ",PS[k][l]); printf("\n");}
*/
double s1 = 0;
for(k=0;k<NAOrb;k++)
{
gint i = (AOrb)?AOrb[k].NumCenter:SAOrb[k].NumCenter;
for(l=0;l<NAOrb;l++)
{
gint j = (AOrb)?AOrb[l].NumCenter:SAOrb[l].NumCenter;
gint ii = i*Ncenters + j - i*(i+1)/2;
if(i>j) ii = j*Ncenters + i - j*(j+1)/2;
bondOrders[ii] += PS[k][l]*PS[l][k];
}
/* printf(" k %d %f\n",i, PS[k][k]);*/
s1 += PS[k][k];
}
/* printf(" s1 = %f\n",s1);*/
for(k=0;k<NAOrb;k++)
for(l=0;l<NAOrb;l++)
{
PS[k][l] = 0;
for(m=0;m<NAOrb;m++) PS[k][l] += Pb[k][m]*S[m][l];
}
/*
printf("Density matrix beta\n");
for(k=0;k<NAOrb;k++) {for(l=0;l<=k;l++) printf("%f ",2*PS[k][l]); printf("\n");}
*/
double s2 = 0;
for(k=0;k<NAOrb;k++)
{
gint i = (AOrb)?AOrb[k].NumCenter:SAOrb[k].NumCenter;
for(l=0;l<NAOrb;l++)
{
gint j = (AOrb)?AOrb[l].NumCenter:SAOrb[l].NumCenter;
gint ii = i*Ncenters + j - i*(i+1)/2;
if(i>j) ii = j*Ncenters + i - j*(j+1)/2;
bondOrders[ii] += PS[k][l]*PS[l][k];
}
/* printf(" k %d %f\n",i, PS[k][k]);*/
s2 += PS[k][k];
}
/* printf(" s2 = %f\n",s2);*/
progress_orb_txt(0," ",TRUE);
for(i=0;i<NAOrb;i++) g_free(S[i]);
g_free(S);
for(i=0;i<NAOrb;i++) g_free(Pa[i]);
g_free(Pa);
for(i=0;i<NAOrb;i++) g_free(Pb[i]);
g_free(Pb);
for(i=0;i<NAOrb;i++) g_free(PS[i]);
g_free(PS);
result = g_malloc(n2*100*sizeof(gchar));
tmp = g_malloc(BSIZE*sizeof(gchar));
sprintf(result," BondOrders\n");
setTextInProgress(_("Preparation of text to show... Please wait"));
for(i=0;i<Ncenters;i++)
for(j=i+1;j<Ncenters;j++)
{
gint ii = i*Ncenters + j - i*(i+1)/2;
if(i>j) ii = j*Ncenters + i - j*(j+1)/2;
if(CancelCalcul) break;
sprintf(tmp,"Bond %d-%d : %lf\n",i+1,j+1,bondOrders[ii]);
strcat(result,tmp);
if(CancelCalcul) break;
}
g_free(tmp);
progress_orb_txt(0," ",TRUE);
if(result && !CancelCalcul)
{
GtkWidget* message = showCalculatedBondOrdersDlg(result,"Bond orders ",bondOrders);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
gtk_window_set_transient_for(GTK_WINDOW(message),GTK_WINDOW(PrincipalWindow));
}
g_free(result);
}
/********************************************************************************/
static void messageErrorTrans(gchar* fileName)
{
gchar buffer[BSIZE];
sprintf(buffer,_("Sorry, I can not read transition properties from '%s' file\n"),fileName);
Message(buffer,_("Error"),TRUE);
}
/********************************************************************************/
/*
static gboolean read_tansition_properties(GabeditFileChooser *SelecFile, gint response_id)
{
gchar t[BSIZE];
gchar type1[20];
gchar type2[20];
gint i1;
gint i2;
gdouble coef;
gboolean OK = TRUE;
gint numberOfTransitions = 0;
gint* fromI = NULL;
gint* toI = NULL;
gchar** fromType = NULL;
gchar** toType = NULL;
gdouble* coefficients = NULL;
gchar *FileName;
FILE *fd;
int ne = 0;
if(response_id != GTK_RESPONSE_OK) return FALSE;
FileName = gabedit_file_chooser_get_current_file(SelecFile);
fd = FOpen(FileName, "rb");
if(!fd) return FALSE;
while(!feof(fd))
{
if(!fgets(t,BSIZE,fd))break;
ne = sscanf(t,"%d %s %d %s %lf",&i1,type1, &i2, type2, &coef);
if(ne==5 && i1<=NAOrb && i2<=NAOrb && i1>0 && i2>0)
{
numberOfTransitions++;
coefficients = g_realloc(coefficients, numberOfTransitions*sizeof(gdouble));
fromI = g_realloc(fromI, numberOfTransitions*sizeof(gint));
toI = g_realloc(toI, numberOfTransitions*sizeof(gint));
fromType = g_realloc(fromType, numberOfTransitions*sizeof(gchar*));
toType = g_realloc(toType, numberOfTransitions*sizeof(gchar*));
coefficients[numberOfTransitions-1] = coef;
fromI[numberOfTransitions-1] = i1;
toI[numberOfTransitions-1] = i2;
fromType[numberOfTransitions-1] = g_strdup(type1);
toType[numberOfTransitions-1] = g_strdup(type2);
printf("t=%s\n",t);
}
else { OK= FALSE; break;}
}
if(numberOfTransitions>0 && OK)
{
//createIRSpectrumWin(numberOfFrequencies, frequencies, intensities);
}
else
{
OK = FALSE;
messageErrorTrans(FileName);
}
if(coefficients) g_free(coefficients);
if(fromType)
{
gint i;
for(i=0;i<numberOfTransitions;i++) if(fromType[i]) g_free(fromType[i]);
g_free(fromType);
}
if(toType)
{
gint i;
for(i=0;i<numberOfTransitions;i++) if(toType[i]) g_free(toType[i]);
g_free(toType);
}
if(fromI) g_free(fromI);
if(toI) g_free(toI);
fclose(fd);
return OK;
}
*/
/********************************************************************************/
/*
void lambda_diagnostic_dlg()
{
GtkWidget* filesel =
file_chooser_open(read_tansition_properties,
_("Read transition properties from a sample file(5columns : num1 type(alpha or beta) num2 type coffeifient)"),
GABEDIT_TYPEFILE_TXT,GABEDIT_TYPEWIN_OTHER);
gtk_window_set_modal (GTK_WINDOW (filesel), TRUE);
}
*/
/********************************************************************************/
static void apply_lambda_diagnostic(GtkWidget *Win,gpointer data)
{
GtkWidget** entriestmp = NULL;
G_CONST_RETURN gchar* temp;
gchar* dump;
gint i;
gint j;
GridLimits limitstmp;
gint NumPointstmp[3];
GtkWidget *entries[3][6];
gdouble V[3][3];
GtkWidget* buttonFileSelector = NULL;
gdouble integ[3], normi, normj, overlap;
gchar* result = NULL;
gchar t[BSIZE];
gchar type1[20];
gchar type2[20];
gint i1;
gint i2;
gdouble coef;
gboolean OK = TRUE;
gint numberOfTransitions = 0;
gint* fromI = NULL;
gint* toI = NULL;
gint* fromType = NULL;
gint* toType = NULL;
gdouble* coefficients = NULL;
gchar *FileName;
FILE *fd;
int ne = 0;
if(GTK_IS_WIDGET(Win))
{
entriestmp = (GtkWidget **)g_object_get_data(G_OBJECT (Win), "Entries");
buttonFileSelector = g_object_get_data (G_OBJECT (Win), "ButtonFileSelector");
}
else return;
if(entriestmp==NULL) return;
if(!buttonFileSelector) return;
for(i=0;i<3;i++)
for(j=0;j<6;j++)
entries[i][j] = entriestmp[i*6+j];
for(i=0;i<3;i++)
{
for(j=3;j<5;j++)
{
temp = gtk_entry_get_text(GTK_ENTRY(entries[i][j]));
dump = NULL;
if(temp && strlen(temp)>0)
{
dump = g_strdup(temp);
delete_first_spaces(dump);
delete_last_spaces(dump);
}
if(dump && strlen(dump)>0 && this_is_a_real(dump))
{
limitstmp.MinMax[j-3][i] = atof(dump);
}
else
{
GtkWidget* message = Message(_("Error : an entry is not a float "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
if(dump) g_free(dump);
}
temp = gtk_entry_get_text(GTK_ENTRY(entries[i][5]));
NumPointstmp[i] = atoi(temp);
if(NumPointstmp[i] <=2)
{
GtkWidget* message = Message(_("Error : The number of points should be > 2. "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
}
for(i=0;i<3;i++)
{
if( limitstmp.MinMax[0][i]> limitstmp.MinMax[1][i])
{
GtkWidget* message = Message(_("Error : The minimal value should be smaller than the maximal value "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
}
for(i=0;i<3;i++)
{
for(j=0;j<3;j++)
{
V[i][j] = 0;
temp = gtk_entry_get_text(GTK_ENTRY(entries[i][j]));
dump = NULL;
if(temp && strlen(temp)>0)
{
dump = g_strdup(temp);
delete_first_spaces(dump);
delete_last_spaces(dump);
}
if(dump && strlen(dump)>0 && this_is_a_real(dump))
{
V[i][j] = atof(dump);
}
else
{
GtkWidget* message = Message(_("Error : an entry is not a float "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
if(dump) g_free(dump);
}
}
for(i=0;i<3;i++)
{
gdouble norm = 0.0;
for(j=0;j<3;j++)
norm += V[i][j]*V[i][j];
if(fabs(norm)<1e-8)
{
GtkWidget* message = Message(_("Error : the norm is equal to 0 "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
for(j=0;j<3;j++)
V[i][j] /= sqrt(norm);
}
for(j=0;j<3;j++) originOfCube[j] = 0;
for(j=0;j<3;j++) firstDirection[j] = V[0][j];
for(j=0;j<3;j++) secondDirection[j] = V[1][j];
for(j=0;j<3;j++) thirdDirection[j] = V[2][j];
for(i=0;i<3;i++)
{
NumPoints[i] =NumPointstmp[i] ;
for(j=0;j<2;j++)
limits.MinMax[j][i] =limitstmp.MinMax[j][i];
}
CancelCalcul = FALSE;
/* printf("DirName = %s\n",dirName);*/
FileName = gabedit_file_chooser_get_current_file(GABEDIT_FILE_CHOOSER(buttonFileSelector));
fd = FOpen(FileName, "rb");
if(!fd)
{
GtkWidget* message = Message(_("I cannot open the data file "),_("Error"),TRUE);
gtk_window_set_modal (GTK_WINDOW (message), TRUE);
return;
}
while(!feof(fd))
{
gdouble scale = 1.0;
if(!fgets(t,BSIZE,fd))break;
ne = sscanf(t,"%d %s %d %s %lf",&i1,type1, &i2, type2, &coef);
if(ne!=5 ) { ne = sscanf(t,"%d %d %lf",&i1, &i2, &coef); ne=5; sprintf(type1,"A"); sprintf(type2,"A"); scale = sqrt(2.0);}
if(ne==5 && i1<=NAOrb && i2<=NAOrb && i1>0 && i2>0)
{
numberOfTransitions++;
coefficients = g_realloc(coefficients, numberOfTransitions*sizeof(gdouble));
fromI = g_realloc(fromI, numberOfTransitions*sizeof(gint));
toI = g_realloc(toI, numberOfTransitions*sizeof(gint));
fromType = g_realloc(fromType, numberOfTransitions*sizeof(gchar*));
toType = g_realloc(toType, numberOfTransitions*sizeof(gchar*));
coefficients[numberOfTransitions-1] = coef*scale;
fromI[numberOfTransitions-1] = i1-1;
toI[numberOfTransitions-1] = i2-1;
fromType[numberOfTransitions-1] = 1;
toType[numberOfTransitions-1] = 1;
if(strstr(type1,"B") || strstr(type1,"b")) fromType[numberOfTransitions-1] = 2;
if(strstr(type2,"B") || strstr(type2,"b")) toType[numberOfTransitions-1] = 2;
printf("t=%s\n",t);
}
else { OK= FALSE; break;}
}
if(numberOfTransitions==0 || !OK)
{
messageErrorTrans(FileName);
if(coefficients) g_free(coefficients);
if(fromType) g_free(fromType);
if(toType) g_free(toType);
if(fromI) g_free(fromI);
if(toI) g_free(toI);
return;
}
fclose(fd);
/* computing */
{
gint typeOrbi = 1;
gint typeOrbj = 1;
gdouble lambda = 0.0;
gdouble sum = 0.0;
gdouble cc = 0.0;
gchar* old;
delete_child(Win);
for(i=0;i<numberOfTransitions;i++)
{
gchar* tmp = NULL;
gint ii = fromI[i];
gint jj = toI[i];
typeOrbi = fromType[i];
typeOrbj = toType[i];
if(CancelCalcul) break;
tmp = compute_spatial_overlapij( NumPoints,limits, typeOrbi, ii, typeOrbj, jj, integ, &normi, &normj, &overlap);
if(tmp)
{
old = result;
if(old)
{
result = g_strdup_printf("%s%s",old,tmp);
g_free(old);
}
else result = g_strdup_printf("%s",tmp);
}
cc = coefficients[i]*coefficients[i];
sum += cc;
lambda += *integ*cc;
}
if(sum>0) lambda /= sum;
/* put result in result variable */
old = result;
if(old)
{
result = g_strdup_printf("%s\nSum = %f\nLambda = %f\n",old,sum,lambda);
g_free(old);
}
else result = g_strdup_printf("Sum = %f\nLambda = %f\n",sum,lambda);
}
if(result && !CancelCalcul)
{
GtkWidget* message = MessageTxt(result,_("Result"));
gtk_window_set_default_size (GTK_WINDOW(message),(gint)(ScreenWidth*0.8),-1);
gtk_widget_set_size_request(message,(gint)(ScreenWidth*0.45),-1);
/* gtk_window_set_modal (GTK_WINDOW (message), TRUE);*/
gtk_window_set_transient_for(GTK_WINDOW(message),GTK_WINDOW(PrincipalWindow));
}
set_label_title(NULL,0,0);
if(CancelCalcul) CancelCalcul = FALSE;
if(coefficients) g_free(coefficients);
if(fromType) g_free(fromType);
if(toType) g_free(toType);
if(fromI) g_free(fromI);
if(toI) g_free(toI);
}
/***************************************************************************/
static void help_trans_prop()
{
gchar temp[BSIZE];
GtkWidget* win;
sprintf(temp,
_(" Lambda is calculated as in M.J.G. Peach et al. J. Chem. Phys. 128, 044118 (2008).\n\n"
" You must select the file containing the transition properties. \n\n"
" The text file must contain 5 columns by line.\n"
" First line : an integer. The electron is excited from this orbital.\n"
" second line : a character B or A. The spin of electron.\n"
" Third line : an integer. The electron is excited to this orbital.\n"
" Forth line : a character B or A. The spin of electron.\n"
" Fifth line : a float. The largest coefficients in the CI expansion.\n\n"
" Example :\n"
" 5 B 6 B 0.401\n"
" 4 A 7 B 0.205\n\n"
" A text file with 3 columns by line is also accepted.\n"
" Example :\n"
" 5 6 0.401\n"
" 4 7 0.205\n\n"
)
);
win = Message(temp,_(" Info "),FALSE);
gtk_window_set_modal (GTK_WINDOW (win), TRUE);
}
/********************************************************************************/
void lambda_diagnostic_dlg()
{
GtkWidget *Win;
GtkWidget *frameGrid;
GtkWidget *hbox;
GtkWidget *vboxall;
GtkWidget *vboxwin;
GtkWidget *button;
/* GtkWidget *label;*/
GtkWidget** entries;
GtkWidget *buttonFileSelector;
G_CONST_RETURN gchar* temp;
static gboolean first = TRUE;
if(!GeomOrb)
{
Message(_("Sorry, Please read the MO before\n"),_("Error"),TRUE);
return;
}
if(!CoefAlphaOrbitals)
{
Message(_("Sorry, Please load the MO before\n"),_("Error"),TRUE);
return;
}
if(!AOrb && !SAOrb)
{
Message(_("Sorry, Please load the MO before\n"),_("Error"),TRUE);
return;
}
if(!AOAvailable &&(TypeGrid == GABEDIT_TYPEGRID_DDENSITY || TypeGrid == GABEDIT_TYPEGRID_ADENSITY))
{
Message(_("Sorry, No atomic orbitals available.\nPlease use a gabedit file for load : \n"
"Geometry, Molecular and Atomic Orbitals\n"),_("Error"),TRUE);
return;
}
Win = gtk_window_new(GTK_WINDOW_TOPLEVEL);
gtk_window_set_title(GTK_WINDOW(Win),"Lambda diagnostic");
gtk_window_set_position(GTK_WINDOW(Win),GTK_WIN_POS_CENTER);
gtk_container_set_border_width (GTK_CONTAINER (Win), 5);
gtk_window_set_transient_for(GTK_WINDOW(Win),GTK_WINDOW(PrincipalWindow));
gtk_window_set_modal (GTK_WINDOW (Win), TRUE);
add_glarea_child(Win,"Grid ");
vboxall = create_vbox(Win);
vboxwin = vboxall;
hbox = gtk_hbox_new (FALSE, 0);
gtk_box_pack_start (GTK_BOX (vboxall), hbox, TRUE, TRUE, 0);
/*
label = gtk_label_new(_("File containing the transition properties :"));
gtk_box_pack_start (GTK_BOX (hbox), label, TRUE, TRUE, 0);
*/
buttonFileSelector = gtk_file_chooser_button_new(_("select the file containing the transition properties(5columns : num1 type(alpha or beta) num2 type coefficient)"),
GTK_FILE_CHOOSER_ACTION_OPEN);
g_object_set_data (G_OBJECT (Win), "ButtonFileSelector",buttonFileSelector);
gtk_box_pack_start (GTK_BOX (hbox), buttonFileSelector, TRUE, TRUE, 5);
button = create_button(Win,_("Help"));
gtk_box_pack_start (GTK_BOX (hbox), button, FALSE, FALSE, 5);
gtk_widget_show (button);
g_signal_connect_swapped(G_OBJECT(button), "clicked",(GCallback)help_trans_prop,G_OBJECT(Win));
gtk_box_pack_start (GTK_BOX (vboxall), gtk_hseparator_new(), TRUE, TRUE, 5);
frameGrid = create_grid_frame(vboxall,"Box & Grid");
entries = (GtkWidget**) g_object_get_data (G_OBJECT (frameGrid), "Entries");
if(first)
{
temp = gtk_entry_get_text(GTK_ENTRY(entries[3]));
if(temp && strlen(temp)>0)
{
gchar* newval = g_strdup_printf("%f",atof(temp)*5);
gtk_entry_set_text(GTK_ENTRY(entries[3]),newval);
}
first = FALSE;
}
g_object_set_data (G_OBJECT (Win), "Entries",entries);
g_object_set_data (G_OBJECT (Win), "FrameGrid",frameGrid);
gtk_widget_set_sensitive(frameGrid, TRUE);
hbox = create_hbox_false(vboxwin);
gtk_widget_realize(Win);
button = create_button(Win,_("OK"));
gtk_box_pack_end (GTK_BOX( hbox), button, FALSE, TRUE, 3);
GTK_WIDGET_SET_FLAGS(button, GTK_CAN_DEFAULT);
gtk_widget_grab_default(button);
gtk_widget_show (button);
g_signal_connect_swapped(G_OBJECT(button), "clicked",(GCallback)apply_lambda_diagnostic,G_OBJECT(Win));
button = create_button(Win,_("Cancel"));
GTK_WIDGET_SET_FLAGS(button, GTK_CAN_DEFAULT);
gtk_box_pack_end (GTK_BOX( hbox), button, FALSE, TRUE, 3);
g_signal_connect_swapped(G_OBJECT(button), "clicked",(GCallback)delete_child, G_OBJECT(Win));
g_signal_connect_swapped(G_OBJECT(button), "clicked",(GCallback)gtk_widget_destroy,G_OBJECT(Win));
gtk_widget_show (button);
gtk_widget_show_all (Win);
}
|
8952.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "correlation.h"
/* Array initialization. */
static
void init_array (int m,
int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_correlation(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m),
DATA_TYPE POLYBENCH_1D(stddev,M,m))
{
int i, j, j1, j2;
DATA_TYPE eps = 0.1f;
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#pragma scop
/* Determine mean of column vectors of input data matrix */
#pragma omp parallel private(i, j, j2) num_threads(#P11)
{
#pragma omp
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Determine standard deviations of column vectors of data matrix. */
#pragma omp
for (j = 0; j < _PB_M; j++)
{
stddev[j] = 0.0;
for (i = 0; i < _PB_N; i++)
stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]);
stddev[j] /= float_n;
stddev[j] = sqrt_of_array_cell(stddev, j);
/* The following in an inelegant but usual way to handle
near-zero std. dev. values, which below would cause a zero-
divide. */
stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j];
}
/* Center and reduce the column vectors. */
#pragma omp
for (i = 0; i < _PB_N; i++)
{
#pragma omp target teams distribute #p #p
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
data[i][j] /= sqrt(float_n) * stddev[j];
}
}
/* Calculate the m * m correlation matrix. */
#pragma omp
for (j1 = 0; j1 < _PB_M-1; j1++)
{
symmat[j1][j1] = 1.0;
for (j2 = j1+1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += (data[i][j1] * data[i][j2]);
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
symmat[_PB_M-1][_PB_M-1] = 1.0;
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_correlation (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean),
POLYBENCH_ARRAY(stddev));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
POLYBENCH_FREE_ARRAY(stddev);
return 0;
}
|
ark_brusselator1D_omp.c | /*---------------------------------------------------------------
* Programmer(s): Daniel R. Reynolds @ SMU
*---------------------------------------------------------------
* LLNS/SMU Copyright Start
* Copyright (c) 2015, Southern Methodist University and
* Lawrence Livermore National Security
*
* This work was performed under the auspices of the U.S. Department
* of Energy by Southern Methodist University and Lawrence Livermore
* National Laboratory under Contract DE-AC52-07NA27344.
* Produced at Southern Methodist University and the Lawrence
* Livermore National Laboratory.
*
* All rights reserved.
* For details, see the LICENSE file.
* LLNS/SMU Copyright End
*---------------------------------------------------------------
* Example problem:
*
* The following test simulates a brusselator problem from chemical
* kinetics. This is n PDE system with 3 components, Y = [u,v,w],
* satisfying the equations,
* u_t = du*u_xx + a - (w+1)*u + v*u^2
* v_t = dv*v_xx + w*u - v*u^2
* w_t = dw*w_xx + (b-w)/ep - w*u
* for t in [0, 80], x in [0, 1], with initial conditions
* u(0,x) = a + 0.1*sin(pi*x)
* v(0,x) = b/a + 0.1*sin(pi*x)
* w(0,x) = b + 0.1*sin(pi*x),
* and with stationary boundary conditions, i.e.
* u_t(t,0) = u_t(t,1) = 0,
* v_t(t,0) = v_t(t,1) = 0,
* w_t(t,0) = w_t(t,1) = 0.
* Note: these can also be implemented as Dirichlet boundary
* conditions with values identical to the initial conditions.
*
* The spatial derivatives are computed using second-order
* centered differences, with the data distributed over N points
* on a uniform spatial grid.
*
* This program solves the problem with the DIRK method, using a
* Newton iteration with the ARKBAND band linear solver, and a
* user-supplied Jacobian routine. This example uses the OpenMP
* vector kernel, and employs OpenMP threading within the
* right-hand side and Jacobian construction functions.
*
* 100 outputs are printed at equal intervals, and run statistics
* are printed at the end.
*---------------------------------------------------------------*/
/* Header files */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <arkode/arkode.h> /* prototypes for ARKode fcts., consts. */
#include <nvector/nvector_openmp.h> /* OpenMP N_Vector types, fcts., macros */
#include <arkode/arkode_band.h> /* prototype for ARKBand solver */
#include <sundials/sundials_types.h> /* def. of type 'realtype' */
#ifdef _OPENMP
#include <omp.h> /* OpenMP functions */
#endif
/* accessor macros between (x,v) location and 1D NVector array */
#define IDX(x,v) (3*(x)+v)
/* user data structure */
typedef struct {
long int N; /* number of intervals */
int nthreads; /* number of OpenMP threads */
realtype dx; /* mesh spacing */
realtype a; /* constant forcing on u */
realtype b; /* steady-state value of w */
realtype du; /* diffusion coeff for u */
realtype dv; /* diffusion coeff for v */
realtype dw; /* diffusion coeff for w */
realtype ep; /* stiffness parameter */
} *UserData;
/* User-supplied Functions Called by the Solver */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data);
static int Jac(long int N, long int mu, long int ml,
realtype t, N_Vector y, N_Vector fy,
DlsMat J, void *user_data,
N_Vector tmp1, N_Vector tmp2, N_Vector tmp3);
/* Private helper functions */
static int LaplaceMatrix(realtype c, DlsMat Jac, UserData udata);
static int ReactionJac(realtype c, N_Vector y, DlsMat Jac, UserData udata);
/* Private function to check function return values */
static int check_flag(void *flagvalue, char *funcname, int opt);
/* Main Program */
int main(int argc, char *argv[])
{
/* general problem parameters */
realtype T0 = RCONST(0.0); /* initial time */
realtype Tf = RCONST(10.0); /* final time */
int Nt = 100; /* total number of output times */
int Nvar = 3; /* number of solution fields */
UserData udata = NULL;
realtype *data;
long int N = 201; /* spatial mesh size */
realtype a = 0.6; /* problem parameters */
realtype b = 2.0;
realtype du = 0.025;
realtype dv = 0.025;
realtype dw = 0.025;
realtype ep = 1.0e-5; /* stiffness parameter */
realtype reltol = 1.0e-6; /* tolerances */
realtype abstol = 1.0e-10;
long int NEQ, i;
/* general problem variables */
int flag; /* reusable error-checking flag */
N_Vector y = NULL; /* empty vector for storing solution */
N_Vector umask = NULL; /* empty mask vectors for viewing solution components */
N_Vector vmask = NULL;
N_Vector wmask = NULL;
void *arkode_mem = NULL; /* empty ARKode memory structure */
realtype pi, t, dTout, tout, u, v, w;
FILE *FID, *UFID, *VFID, *WFID;
int iout, num_threads;
long int nst, nst_a, nfe, nfi, nsetups, nje, nfeLS, nni, ncfn, netf;
/* allocate udata structure */
udata = (UserData) malloc(sizeof(*udata));
if (check_flag((void *) udata, "malloc", 2)) return 1;
/* set the number of threads to use */
num_threads = 1; /* default value */
#ifdef _OPENMP
num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS environment variable */
#endif
if (argc > 1) /* overwrite with command line value, if supplied */
num_threads = strtol(argv[1], NULL, 0);
/* store the inputs in the UserData structure */
udata->N = N;
udata->a = a;
udata->b = b;
udata->du = du;
udata->dv = dv;
udata->dw = dw;
udata->ep = ep;
udata->nthreads = num_threads;
/* set total allocated vector length */
NEQ = Nvar*udata->N;
/* Initial problem output */
printf("\n1D Brusselator PDE test problem:\n");
printf(" N = %li, NEQ = %li\n", udata->N, NEQ);
printf(" num_threads = %i\n", num_threads);
printf(" problem parameters: a = %g, b = %g, ep = %g\n",
udata->a, udata->b, udata->ep);
printf(" diffusion coefficients: du = %g, dv = %g, dw = %g\n",
udata->du, udata->dv, udata->dw);
printf(" reltol = %.1e, abstol = %.1e\n\n", reltol, abstol);
/* Initialize data structures */
y = N_VNew_OpenMP(NEQ, num_threads); /* Create vector for solution */
if (check_flag((void *)y, "N_VNew_OpenMP", 0)) return 1;
udata->dx = RCONST(1.0)/(N-1); /* set spatial mesh spacing */
data = N_VGetArrayPointer(y); /* Access data array for new NVector y */
if (check_flag((void *)data, "N_VGetArrayPointer", 0)) return 1;
umask = N_VNew_OpenMP(NEQ, num_threads); /* Create vector masks */
if (check_flag((void *)umask, "N_VNew_OpenMP", 0)) return 1;
vmask = N_VNew_OpenMP(NEQ, num_threads);
if (check_flag((void *)vmask, "N_VNew_OpenMP", 0)) return 1;
wmask = N_VNew_OpenMP(NEQ, num_threads);
if (check_flag((void *)wmask, "N_VNew_OpenMP", 0)) return 1;
/* Set initial conditions into y */
pi = RCONST(4.0)*atan(RCONST(1.0));
for (i=0; i<N; i++) {
data[IDX(i,0)] = a + RCONST(0.1)*sin(pi*i*udata->dx); /* u */
data[IDX(i,1)] = b/a + RCONST(0.1)*sin(pi*i*udata->dx); /* v */
data[IDX(i,2)] = b + RCONST(0.1)*sin(pi*i*udata->dx); /* w */
}
/* Set mask array values for each solution component */
N_VConst(0.0, umask);
data = N_VGetArrayPointer(umask);
if (check_flag((void *) data, "N_VGetArrayPointer", 0)) return 1;
for (i=0; i<N; i++) data[IDX(i,0)] = RCONST(1.0);
N_VConst(0.0, vmask);
data = N_VGetArrayPointer(vmask);
if (check_flag((void *) data, "N_VGetArrayPointer", 0)) return 1;
for (i=0; i<N; i++) data[IDX(i,1)] = RCONST(1.0);
N_VConst(0.0, wmask);
data = N_VGetArrayPointer(wmask);
if (check_flag((void *) data, "N_VGetArrayPointer", 0)) return 1;
for (i=0; i<N; i++) data[IDX(i,2)] = RCONST(1.0);
/* Create the solver memory */
arkode_mem = ARKodeCreate();
if (check_flag((void *)arkode_mem, "ARKodeCreate", 0)) return 1;
/* Call ARKodeInit to initialize the integrator memory and specify the
hand-side side function in y'=f(t,y), the inital time T0, and
the initial dependent variable vector y. Note: since this
problem is fully implicit, we set f_E to NULL and f_I to f. */
flag = ARKodeInit(arkode_mem, NULL, f, T0, y);
if (check_flag(&flag, "ARKodeInit", 1)) return 1;
/* Set routines */
flag = ARKodeSetUserData(arkode_mem, (void *) udata); /* Pass udata to user functions */
if (check_flag(&flag, "ARKodeSetUserData", 1)) return 1;
flag = ARKodeSStolerances(arkode_mem, reltol, abstol); /* Specify tolerances */
if (check_flag(&flag, "ARKodeSStolerances", 1)) return 1;
/* Linear solver specification */
flag = ARKBand(arkode_mem, NEQ, 4, 4); /* Specify the band linear solver */
if (check_flag(&flag, "ARKBand", 1)) return 1;
flag = ARKDlsSetBandJacFn(arkode_mem, Jac); /* Set the Jacobian routine */
if (check_flag(&flag, "ARKDlsSetBandJacFn", 1)) return 1;
/* output spatial mesh to disk */
FID=fopen("bruss_mesh.txt","w");
for (i=0; i<N; i++) fprintf(FID," %.16e\n", udata->dx*i);
fclose(FID);
/* Open output stream for results, access data arrays */
UFID=fopen("bruss_u.txt","w");
VFID=fopen("bruss_v.txt","w");
WFID=fopen("bruss_w.txt","w");
/* output initial condition to disk */
data = N_VGetArrayPointer(y);
if (check_flag((void *)data, "N_VGetArrayPointer", 0)) return 1;
for (i=0; i<N; i++) fprintf(UFID," %.16e", data[IDX(i,0)]);
for (i=0; i<N; i++) fprintf(VFID," %.16e", data[IDX(i,1)]);
for (i=0; i<N; i++) fprintf(WFID," %.16e", data[IDX(i,2)]);
fprintf(UFID,"\n");
fprintf(VFID,"\n");
fprintf(WFID,"\n");
/* Main time-stepping loop: calls ARKode to perform the integration, then
prints results. Stops when the final time has been reached */
t = T0;
dTout = (Tf-T0)/Nt;
tout = T0+dTout;
printf(" t ||u||_rms ||v||_rms ||w||_rms\n");
printf(" ----------------------------------------------\n");
for (iout=0; iout<Nt; iout++) {
flag = ARKode(arkode_mem, tout, y, &t, ARK_NORMAL); /* call integrator */
if (check_flag(&flag, "ARKode", 1)) break;
u = N_VWL2Norm(y,umask); /* access/print solution statistics */
u = sqrt(u*u/N);
v = N_VWL2Norm(y,vmask);
v = sqrt(v*v/N);
w = N_VWL2Norm(y,wmask);
w = sqrt(w*w/N);
printf(" %10.6f %10.6f %10.6f %10.6f\n", t, u, v, w);
if (flag >= 0) { /* successful solve: update output time */
tout += dTout;
tout = (tout > Tf) ? Tf : tout;
} else { /* unsuccessful solve: break */
fprintf(stderr,"Solver failure, stopping integration\n");
break;
}
/* output results to disk */
for (i=0; i<N; i++) fprintf(UFID," %.16e", data[IDX(i,0)]);
for (i=0; i<N; i++) fprintf(VFID," %.16e", data[IDX(i,1)]);
for (i=0; i<N; i++) fprintf(WFID," %.16e", data[IDX(i,2)]);
fprintf(UFID,"\n");
fprintf(VFID,"\n");
fprintf(WFID,"\n");
}
printf(" ----------------------------------------------\n");
fclose(UFID);
fclose(VFID);
fclose(WFID);
/* Print some final statistics */
flag = ARKodeGetNumSteps(arkode_mem, &nst);
check_flag(&flag, "ARKodeGetNumSteps", 1);
flag = ARKodeGetNumStepAttempts(arkode_mem, &nst_a);
check_flag(&flag, "ARKodeGetNumStepAttempts", 1);
flag = ARKodeGetNumRhsEvals(arkode_mem, &nfe, &nfi);
check_flag(&flag, "ARKodeGetNumRhsEvals", 1);
flag = ARKodeGetNumLinSolvSetups(arkode_mem, &nsetups);
check_flag(&flag, "ARKodeGetNumLinSolvSetups", 1);
flag = ARKodeGetNumErrTestFails(arkode_mem, &netf);
check_flag(&flag, "ARKodeGetNumErrTestFails", 1);
flag = ARKodeGetNumNonlinSolvIters(arkode_mem, &nni);
check_flag(&flag, "ARKodeGetNumNonlinSolvIters", 1);
flag = ARKodeGetNumNonlinSolvConvFails(arkode_mem, &ncfn);
check_flag(&flag, "ARKodeGetNumNonlinSolvConvFails", 1);
flag = ARKDlsGetNumJacEvals(arkode_mem, &nje);
check_flag(&flag, "ARKDlsGetNumJacEvals", 1);
flag = ARKDlsGetNumRhsEvals(arkode_mem, &nfeLS);
check_flag(&flag, "ARKDlsGetNumRhsEvals", 1);
printf("\nFinal Solver Statistics:\n");
printf(" Internal solver steps = %li (attempted = %li)\n", nst, nst_a);
printf(" Total RHS evals: Fe = %li, Fi = %li\n", nfe, nfi);
printf(" Total linear solver setups = %li\n", nsetups);
printf(" Total RHS evals for setting up the linear system = %li\n", nfeLS);
printf(" Total number of Jacobian evaluations = %li\n", nje);
printf(" Total number of Newton iterations = %li\n", nni);
printf(" Total number of nonlinear solver convergence failures = %li\n", ncfn);
printf(" Total number of error test failures = %li\n\n", netf);
/* Clean up and return with successful completion */
N_VDestroy_OpenMP(y); /* Free vectors */
N_VDestroy_OpenMP(umask);
N_VDestroy_OpenMP(vmask);
N_VDestroy_OpenMP(wmask);
free(udata); /* Free user data */
ARKodeFree(&arkode_mem); /* Free integrator memory */
return 0;
}
/*-------------------------------
* Functions called by the solver
*-------------------------------*/
/* f routine to compute the ODE RHS function f(t,y). */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data)
{
UserData udata = (UserData) user_data; /* access problem data */
long int N = udata->N; /* set variable shortcuts */
realtype a = udata->a;
realtype b = udata->b;
realtype ep = udata->ep;
realtype du = udata->du;
realtype dv = udata->dv;
realtype dw = udata->dw;
realtype dx = udata->dx;
realtype *Ydata=NULL, *dYdata=NULL;
realtype uconst, vconst, wconst, u, ul, ur, v, vl, vr, w, wl, wr;
long int i;
/* clear out ydot (to be careful) */
N_VConst(0.0, ydot);
Ydata = N_VGetArrayPointer(y); /* access data arrays */
if (check_flag((void *)Ydata, "N_VGetArrayPointer", 0)) return 1;
dYdata = N_VGetArrayPointer(ydot);
if (check_flag((void *)dYdata, "N_VGetArrayPointer", 0)) return 1;
N_VConst(0.0, ydot); /* initialize ydot to zero */
/* iterate over domain, computing all equations */
uconst = du/dx/dx;
vconst = dv/dx/dx;
wconst = dw/dx/dx;
#pragma omp parallel for default(shared) private(i,u,ul,ur,v,vl,vr,w,wl,wr) schedule(static) num_threads(udata->nthreads)
for (i=1; i<N-1; i++) {
/* set shortcuts */
u = Ydata[IDX(i,0)]; ul = Ydata[IDX(i-1,0)]; ur = Ydata[IDX(i+1,0)];
v = Ydata[IDX(i,1)]; vl = Ydata[IDX(i-1,1)]; vr = Ydata[IDX(i+1,1)];
w = Ydata[IDX(i,2)]; wl = Ydata[IDX(i-1,2)]; wr = Ydata[IDX(i+1,2)];
/* u_t = du*u_xx + a - (w+1)*u + v*u^2 */
dYdata[IDX(i,0)] = (ul - RCONST(2.0)*u + ur)*uconst + a - (w+RCONST(1.0))*u + v*u*u;
/* v_t = dv*v_xx + w*u - v*u^2 */
dYdata[IDX(i,1)] = (vl - RCONST(2.0)*v + vr)*vconst + w*u - v*u*u;
/* w_t = dw*w_xx + (b-w)/ep - w*u */
dYdata[IDX(i,2)] = (wl - RCONST(2.0)*w + wr)*wconst + (b-w)/ep - w*u;
}
/* enforce stationary boundaries */
dYdata[IDX(0,0)] = dYdata[IDX(0,1)] = dYdata[IDX(0,2)] = 0.0;
dYdata[IDX(N-1,0)] = dYdata[IDX(N-1,1)] = dYdata[IDX(N-1,2)] = 0.0;
return 0;
}
/* Jacobian routine to compute J(t,y) = df/dy. */
static int Jac(long int M, long int mu, long int ml,
realtype t, N_Vector y, N_Vector fy,
DlsMat J, void *user_data,
N_Vector tmp1, N_Vector tmp2, N_Vector tmp3)
{
UserData udata = (UserData) user_data; /* access problem data */
SetToZero(J); /* Initialize Jacobian to zero */
/* Fill in the Laplace matrix */
if (LaplaceMatrix(RCONST(1.0), J, udata)) {
printf("Jacobian calculation error in calling LaplaceMatrix!\n");
return 1;
}
/* Add in the Jacobian of the reaction terms matrix */
if (ReactionJac(RCONST(1.0), y, J, udata)) {
printf("Jacobian calculation error in calling ReactionJac!\n");
return 1;
}
return 0;
}
/*-------------------------------
* Private helper functions
*-------------------------------*/
/* Routine to compute the stiffness matrix from (L*y), scaled by the factor c.
We add the result into Jac and do not erase what was already there */
static int LaplaceMatrix(realtype c, DlsMat Jac, UserData udata)
{
long int i; /* set shortcuts */
long int N = udata->N;
realtype dx = udata->dx;
/* iterate over intervals, filling in Jacobian entries */
#pragma omp parallel for default(shared) private(i) schedule(static) num_threads(udata->nthreads)
for (i=1; i<N-1; i++) {
/* Jacobian of (L*y) at this node */
BAND_ELEM(Jac,IDX(i,0),IDX(i-1,0)) += c*udata->du/dx/dx;
BAND_ELEM(Jac,IDX(i,1),IDX(i-1,1)) += c*udata->dv/dx/dx;
BAND_ELEM(Jac,IDX(i,2),IDX(i-1,2)) += c*udata->dw/dx/dx;
BAND_ELEM(Jac,IDX(i,0),IDX(i,0)) += -c*RCONST(2.0)*udata->du/dx/dx;
BAND_ELEM(Jac,IDX(i,1),IDX(i,1)) += -c*RCONST(2.0)*udata->dv/dx/dx;
BAND_ELEM(Jac,IDX(i,2),IDX(i,2)) += -c*RCONST(2.0)*udata->dw/dx/dx;
BAND_ELEM(Jac,IDX(i,0),IDX(i+1,0)) += c*udata->du/dx/dx;
BAND_ELEM(Jac,IDX(i,1),IDX(i+1,1)) += c*udata->dv/dx/dx;
BAND_ELEM(Jac,IDX(i,2),IDX(i+1,2)) += c*udata->dw/dx/dx;
}
return 0;
}
/* Routine to compute the Jacobian matrix from R(y), scaled by the factor c.
We add the result into Jac and do not erase what was already there */
static int ReactionJac(realtype c, N_Vector y, DlsMat Jac, UserData udata)
{
long int N = udata->N; /* set shortcuts */
long int i;
realtype u, v, w;
realtype ep = udata->ep;
realtype *Ydata = N_VGetArrayPointer(y); /* access solution array */
if (check_flag((void *)Ydata, "N_VGetArrayPointer", 0)) return 1;
/* iterate over nodes, filling in Jacobian entries */
#pragma omp parallel for default(shared) private(i,u,v,w) schedule(static) num_threads(udata->nthreads)
for (i=1; i<N-1; i++) {
/* set nodal value shortcuts (shifted index due to start at first interior node) */
u = Ydata[IDX(i,0)];
v = Ydata[IDX(i,1)];
w = Ydata[IDX(i,2)];
/* all vars wrt u */
BAND_ELEM(Jac,IDX(i,0),IDX(i,0)) += c*(RCONST(2.0)*u*v-(w+RCONST(1.0)));
BAND_ELEM(Jac,IDX(i,1),IDX(i,0)) += c*(w - RCONST(2.0)*u*v);
BAND_ELEM(Jac,IDX(i,2),IDX(i,0)) += c*(-w);
/* all vars wrt v */
BAND_ELEM(Jac,IDX(i,0),IDX(i,1)) += c*(u*u);
BAND_ELEM(Jac,IDX(i,1),IDX(i,1)) += c*(-u*u);
/* all vars wrt w */
BAND_ELEM(Jac,IDX(i,0),IDX(i,2)) += c*(-u);
BAND_ELEM(Jac,IDX(i,1),IDX(i,2)) += c*(u);
BAND_ELEM(Jac,IDX(i,2),IDX(i,2)) += c*(-RCONST(1.0)/ep - u);
}
return 0;
}
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns a flag so check if
flag >= 0
opt == 2 means function allocates memory so check if returned
NULL pointer
*/
static int check_flag(void *flagvalue, char *funcname, int opt)
{
int *errflag;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && flagvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return 1; }
/* Check if flag < 0 */
else if (opt == 1) {
errflag = (int *) flagvalue;
if (*errflag < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n",
funcname, *errflag);
return 1; }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && flagvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return 1; }
return 0;
}
/*---- end of file ----*/
|
desktop-server.h | # pragma once
# include "./servers.h"
namespace uplink {
struct DesktopUI;
//------------------------------------------------------------------------------
struct DesktopServer : Server
{
public:
DesktopServer (const std::string& serviceName, int servicePort, objc_weak ServerDelegate* serverDelegate);
~DesktopServer ();
void init(unsigned int bufferSize, unsigned int feedbackWidth, unsigned int feedbackHeight);
public:
//DesktopServerUI& ui() { return *_ui; }
void updateCalibration(const uplink::CameraCalibration& calibrationDepth, const uplink::CameraCalibration& calibrationColor,
unsigned int depthWidth, unsigned int depthHeight, unsigned int colorWidth, unsigned int colorHeight);
bool getCalibration(uplink::CameraCalibration& calibrationDepth, uplink::CameraCalibration& calibrationColor,
unsigned int& depthWidth, unsigned int& depthHeight, unsigned int& colorWidth, unsigned int& colorHeight);
bool hasCalibration() { const MutexLocker _(m_mutex); return m_bHasCalibration; }
void setRecordPressed(bool b) { m_bRecordPressed = b; }
bool isRecordPressed() const { return m_bRecordPressed; }
void startRecording() { if (m_bRecordPressed) m_bIsRecording = true; }
void stopRecording() { m_bIsRecording = false; }
bool isRecording() const { return m_bIsRecording; }
std::pair<float*, uint8*> process(float* oldDepth, uint8* oldColor) {
//std::cout << "[get]: " << m_depthFilledList.size() << ", " << m_depthEmptyList.size() << std::endl;
if (m_depthFilledList.empty() && !m_bIsRecording) return std::pair<float*, uint8*>(NULL, NULL);
while (m_depthFilledList.empty()) {
//std::cerr << "waiting for frames" << std::endl;
sleep(0.01f);
}
//std::cout << "[get]: " << m_depthFilledList.size() << ", " << m_depthEmptyList.size() << std::endl;
const MutexLocker m(m_mutex);
float* depth = m_depthFilledList.front();
m_depthFilledList.pop_front();
if (oldDepth != NULL) m_depthEmptyList.push_back(oldDepth);
uint8* color = m_colorFilledList.front();
m_colorFilledList.pop_front();
if (oldColor != NULL) m_colorEmptyList.push_back(oldColor);
//std::cout << "[get success]" << std::endl;
return std::pair<float*, uint8*>(depth, color);
}
void receive(uint16* recDepth, uint8* recColor) {
if (!m_bIsRecording || recColor == NULL || recDepth == NULL) return;
//std::cout << "[receive]: " << m_depthFilledList.size() << ", " << m_depthEmptyList.size() << std::endl;
while (m_depthEmptyList.empty()) {
//std::cout << "list full -- frame lost" << std::endl;
return;
}
float* depth = NULL;
uint8* color = NULL;
{
const MutexLocker m(m_mutex);
depth = m_depthEmptyList.front();
m_depthEmptyList.pop_front();
while (m_colorEmptyList.empty()) {
std::cout << "ERROR: color/depth not synced" << std::endl;
return;
}
color = m_colorEmptyList.front();
m_colorEmptyList.pop_front();
}
// depth
for (unsigned int i = 0; i < m_depthWidth*m_depthHeight; i++) {
uint16 v = recDepth[i];
if (v > 0 && v < shift2depth(0xffff)) {
depth[i] = (float)v * 0.001f;
}
else {
depth[i] = -std::numeric_limits<float>::infinity();
}
}
// color
for (unsigned int y = 0; y < m_colorHeight; y++) {
for (unsigned int x = 0; x < m_colorWidth; x++) {
unsigned int idx = y * m_colorWidth + x;
color[idx*m_numColorChannels + 0] = recColor[idx * 3 + 0];
color[idx*m_numColorChannels + 1] = recColor[idx * 3 + 1];
color[idx*m_numColorChannels + 2] = recColor[idx * 3 + 2];
color[idx*m_numColorChannels + 3] = 255;
}
}
const MutexLocker m(m_mutex);
m_depthFilledList.push_back(depth);
m_colorFilledList.push_back(color);
}
void updateFeedbackImage(BYTE* data) {
m_feedbackImage.format = uplink::ImageFormat_RGB;
m_feedbackImage.width = m_feedbackWidth;
m_feedbackImage.height = m_feedbackHeight;
m_feedbackImage.planes[0].buffer = m_feedbackImageData;
m_feedbackImage.planes[0].sizeInBytes = sizeof(uint8) * m_feedbackWidth * m_feedbackHeight;
m_feedbackImage.planes[0].bytesPerRow = sizeof(uint8) * m_feedbackWidth;
#pragma omp parallel for
for (int i = 0; i < m_feedbackImage.width * m_feedbackImage.height; i++) {
m_feedbackImageData[i * 3 + 0] = data[i * 4 + 0];
m_feedbackImageData[i * 3 + 1] = data[i * 4 + 1];
m_feedbackImageData[i * 3 + 2] = data[i * 4 + 2];
}
}
const Image& getFeedbackImage() const { return m_feedbackImage; }
private:
//DesktopServerUI* _ui;
std::list<float*> m_depthEmptyList;
std::list<float*> m_depthFilledList;
std::list<uint8*> m_colorEmptyList;
std::list<uint8*> m_colorFilledList;
std::vector<float*> m_depthBuffer; // data buffers
std::vector<uint8*> m_colorBuffer;
//std::list<double> m_depthTimestamps;
//std::list<double> m_colorTimestamps;
Mutex m_mutex;
unsigned int m_depthWidth;
unsigned int m_depthHeight;
unsigned int m_colorWidth;
unsigned int m_colorHeight;
unsigned int m_numColorChannels;
uplink::CameraCalibration m_calibrationDepth;
uplink::CameraCalibration m_calibrationColor;
bool m_bHasCalibration;
bool m_bIsRecording;
bool m_bRecordPressed;
Image m_feedbackImage; // sent back to app
uint8* m_feedbackImageData;
unsigned int m_feedbackWidth, m_feedbackHeight;
};
//------------------------------------------------------------------------------
struct DesktopServerSession : ServerSession
{
DesktopServerSession (int socketDescriptor, Server* server)
: ServerSession(socketDescriptor, server)
{
}
DesktopServer& server () { return *downcast<DesktopServer>(&ServerSession::server()); }
};
//------------------------------------------------------------------------------
} // uplink namespace
# include "./desktop-server.hpp"
|
optimizer.c | /*
*
*/
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include "cint.h"
#include "cvhf.h"
#include "optimizer.h"
#define MAX(I,J) ((I) > (J) ? (I) : (J))
int int2e_sph();
void CVHFinit_optimizer(CVHFOpt **opt, int *atm, int natm,
int *bas, int nbas, double *env)
{
CVHFOpt *opt0 = (CVHFOpt *)malloc(sizeof(CVHFOpt));
opt0->nbas = nbas;
opt0->direct_scf_cutoff = 1e-14;
opt0->q_cond = NULL;
opt0->dm_cond = NULL;
opt0->fprescreen = &CVHFnoscreen;
opt0->r_vkscreen = &CVHFr_vknoscreen;
*opt = opt0;
}
void CVHFdel_optimizer(CVHFOpt **opt)
{
CVHFOpt *opt0 = *opt;
if (!opt0) {
return;
}
if (!opt0->q_cond) {
free(opt0->q_cond);
opt0->q_cond = NULL;
}
if (!opt0->dm_cond) {
free(opt0->dm_cond);
opt0->dm_cond = NULL;
}
free(opt0);
*opt = NULL;
}
int CVHFnoscreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
return 1;
}
int CVHFnr_schwarz_cond(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1;
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
return qijkl > opt->direct_scf_cutoff;
}
int CVHFnrs8_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((4*opt->dm_cond[j*n+i] > dmin)
|| (4*opt->dm_cond[l*n+k] > dmin)
|| ( opt->dm_cond[j*n+k] > dmin)
|| ( opt->dm_cond[j*n+l] > dmin)
|| ( opt->dm_cond[i*n+k] > dmin)
|| ( opt->dm_cond[i*n+l] > dmin));
}
// return flag to decide whether transpose01324
int CVHFr_vknoscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int idm;
for (idm = 0; idm < n_dm; idm++) {
dms_cond[idm] = NULL;
}
*dm_atleast = 0;
return 1;
}
void CVHFset_direct_scf_cutoff(CVHFOpt *opt, double cutoff)
{
opt->direct_scf_cutoff = cutoff;
}
double CVHFget_direct_scf_cutoff(CVHFOpt *opt)
{
return opt->direct_scf_cutoff;
}
void CVHFsetnr_direct_scf(CVHFOpt *opt, int *atm, int natm,
int *bas, int nbas, double *env)
{
/* This memory is released in void CVHFdel_optimizer, Don't know
* why valgrind raises memory leak here */
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(&int2e_sph, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel default(none) \
shared(opt, atm, natm, bas, nbas, env)
{
double qtmp, tmp;
int ij, i, j, di, dj, ish, jsh;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = CINTcgto_spheric(ish, bas);
di = MAX(di, dj);
}
double *buf = malloc(sizeof(double) * di*di*di*di);
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
ish = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
jsh = ij - ish*(ish+1)/2;
di = CINTcgto_spheric(ish, bas);
dj = CINTcgto_spheric(jsh, bas);
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
if (0 != int2e_sph(buf, NULL, shls, atm, natm, bas, nbas, env, NULL, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
tmp = fabs(buf[i+di*j+di*dj*i+di*dj*di*j]);
qtmp = MAX(qtmp, tmp);
} }
qtmp = sqrt(qtmp);
}
opt->q_cond[ish*nbas+jsh] = qtmp;
opt->q_cond[jsh*nbas+ish] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFsetnr_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
if (opt->dm_cond) { // NOT reuse opt->dm_cond because nset may be diff in different call
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double) * nbas*nbas);
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas);
const int nao = ao_loc[nbas];
double dmax, tmp;
int i, j, ish, jsh;
int iset;
double *pdm;
for (ish = 0; ish < nbas; ish++) {
for (jsh = 0; jsh < nbas; jsh++) {
dmax = 0;
for (iset = 0; iset < nset; iset++) {
pdm = dm + nao*nao*iset;
for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) {
for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) {
tmp = fabs(pdm[i*nao+j]);
dmax = MAX(dmax, tmp);
} }
}
opt->dm_cond[ish*nbas+jsh] = dmax;
} }
}
/*
*************************************************
*/
void CVHFnr_optimizer(CVHFOpt **vhfopt, int *atm, int natm,
int *bas, int nbas, double *env)
{
CVHFinit_optimizer(vhfopt, atm, natm, bas, nbas, env);
(*vhfopt)->fprescreen = &CVHFnrs8_prescreen;
CVHFsetnr_direct_scf(*vhfopt, atm, natm, bas, nbas, env);
}
|
nested_serial.c | // RUN: %libomp-compile && env OMP_DISPLAY_AFFINITY=true %libomp-run | %python %S/check.py -c 'CHECK' %s
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char** argv) {
omp_set_affinity_format("TESTER: tl:%L at:%a tn:%n nt:%N");
omp_set_nested(1);
#pragma omp parallel num_threads(1)
{
#pragma omp parallel num_threads(1)
{ }
#pragma omp parallel num_threads(1)
{ }
#pragma omp parallel num_threads(1)
{
#pragma omp parallel num_threads(1)
{ }
}
#pragma omp parallel num_threads(1)
{ }
}
#pragma omp parallel num_threads(1)
{ }
#pragma omp parallel num_threads(1)
{ }
return 0;
}
// CHECK: num_threads=1 TESTER: tl:1 at:0 tn:0 nt:1
// CHECK: num_threads=1 TESTER: tl:2 at:0 tn:0 nt:1
// CHECK: num_threads=1 TESTER: tl:3 at:0 tn:0 nt:1
// CHECK: num_threads=1 TESTER: tl:2 at:0 tn:0 nt:1
// CHECK: num_threads=1 TESTER: tl:1 at:0 tn:0 nt:1
|
bug_nested_proxy_task.c | // RUN: %libomp-compile-and-run
// REQUIRES: openmp-4.5
// The runtime currently does not get dependency information from GCC.
// UNSUPPORTED: gcc
#include <stdio.h>
#include <omp.h>
#include <pthread.h>
#include "omp_my_sleep.h"
/*
With task dependencies one can generate proxy tasks from an explicit task
being executed by a serial task team. The OpenMP runtime library didn't
expect that and tries to free the explicit task that is the parent of the
proxy task still working in background. It therefore has incomplete children
which triggers a debugging assertion.
*/
// Compiler-generated code (emulation)
typedef long kmp_intptr_t;
typedef int kmp_int32;
typedef char bool;
typedef struct ident {
kmp_int32 reserved_1; /**< might be used in Fortran; see above */
kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags; KMP_IDENT_KMPC identifies this union member */
kmp_int32 reserved_2; /**< not really used in Fortran any more; see above */
#if USE_ITT_BUILD
/* but currently used for storing region-specific ITT */
/* contextual information. */
#endif /* USE_ITT_BUILD */
kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for C++ */
char const *psource; /**< String describing the source location.
The string is composed of semi-colon separated fields which describe the source file,
the function and a pair of line numbers that delimit the construct.
*/
} ident_t;
typedef struct kmp_depend_info {
kmp_intptr_t base_addr;
size_t len;
struct {
bool in:1;
bool out:1;
} flags;
} kmp_depend_info_t;
struct kmp_task;
typedef kmp_int32 (* kmp_routine_entry_t)( kmp_int32, struct kmp_task * );
typedef struct kmp_task { /* GEH: Shouldn't this be aligned somehow? */
void * shareds; /**< pointer to block of pointers to shared vars */
kmp_routine_entry_t routine; /**< pointer to routine to call for executing task */
kmp_int32 part_id; /**< part id for the task */
} kmp_task_t;
#ifdef __cplusplus
extern "C" {
#endif
kmp_int32 __kmpc_global_thread_num ( ident_t * );
kmp_task_t*
__kmpc_omp_task_alloc( ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags,
size_t sizeof_kmp_task_t, size_t sizeof_shareds,
kmp_routine_entry_t task_entry );
void __kmpc_proxy_task_completed_ooo ( kmp_task_t *ptask );
kmp_int32 __kmpc_omp_task_with_deps ( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task,
kmp_int32 ndeps, kmp_depend_info_t *dep_list,
kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list );
kmp_int32
__kmpc_omp_task( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task );
#ifdef __cplusplus
}
#endif
void *target(void *task)
{
my_sleep( 0.1 );
__kmpc_proxy_task_completed_ooo((kmp_task_t*) task);
return NULL;
}
pthread_t target_thread;
// User's code
int task_entry(kmp_int32 gtid, kmp_task_t *task)
{
pthread_create(&target_thread, NULL, &target, task);
return 0;
}
int main()
{
int dep;
#pragma omp taskgroup
{
/*
* Corresponds to:
#pragma omp target nowait depend(out: dep)
{
my_sleep( 0.1 );
}
*/
kmp_depend_info_t dep_info;
dep_info.base_addr = (long) &dep;
dep_info.len = sizeof(int);
// out = inout per spec and runtime expects this
dep_info.flags.in = 1;
dep_info.flags.out = 1;
kmp_int32 gtid = __kmpc_global_thread_num(NULL);
kmp_task_t *proxy_task = __kmpc_omp_task_alloc(NULL,gtid,17,sizeof(kmp_task_t),0,&task_entry);
__kmpc_omp_task_with_deps(NULL,gtid,proxy_task,1,&dep_info,0,NULL);
#pragma omp task depend(in: dep)
{
/*
* Corresponds to:
#pragma omp target nowait
{
my_sleep( 0.1 );
}
*/
kmp_task_t *nested_proxy_task = __kmpc_omp_task_alloc(NULL,gtid,17,sizeof(kmp_task_t),0,&task_entry);
__kmpc_omp_task(NULL,gtid,nested_proxy_task);
}
}
// only check that it didn't crash
return 0;
}
|
DRB101-task-value-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Cover an implicitly determined rule: In a task generating construct,
* a variable without applicable rules is firstprivate.
* */
#include <stdio.h>
#define MYLEN 100
int a[MYLEN];
int b[MYLEN];
void gen_task(int i)
{
a[i]= i+1;
}
int main()
{
int i=0;
#pragma omp parallel for private(i )
for (i=0; i<MYLEN; i++)
{
gen_task(i);
}
/* checking control flow */
#pragma omp parallel for private(i )
for (i=0; i<MYLEN; i++)
{
//assert (a[i]==i+1);
if (a[i]!= i+1)
{
b[i] = a[i];
}
}
for (i=0; i<MYLEN; i++)
{
printf("%d %d\n", a[i], b[i]);
}
return 0;
}
|
associate_ptr.c | // RUN: %libomptarget-compile-run-and-check-generic
// REQUIRES: unified_shared_memory
// UNSUPPORTED: clang-6, clang-7, clang-8, clang-9
#include <assert.h>
#include <omp.h>
#include <stdio.h>
#pragma omp requires unified_shared_memory
int main(int argc, char *argv[]) {
int dev = omp_get_default_device();
int x = 10;
int *x_dev = (int *)omp_target_alloc(sizeof x, dev);
assert(x_dev && "expected omp_target_alloc to succeed");
int rc = omp_target_associate_ptr(&x, x_dev, sizeof x, 0, dev);
assert(!rc && "expected omp_target_associate_ptr to succeed");
// To determine whether x needs to be transfered, the runtime cannot simply
// check whether unified shared memory is enabled and the 'close' modifier is
// specified. It must check whether x was previously placed in device memory
// by, for example, omp_target_associate_ptr.
#pragma omp target map(always, tofrom: x)
x += 1;
// CHECK: x=11
printf("x=%d\n", x);
// CHECK: present: 1
printf("present: %d\n", omp_target_is_present(&x, dev));
return 0;
}
|
GB_unop__sqrt_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__sqrt_fc64_fc64)
// op(A') function: GB (_unop_tran__sqrt_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = csqrt (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = csqrt (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = csqrt (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SQRT || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__sqrt_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = csqrt (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = csqrt (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__sqrt_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 4;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(32*t2-Nz,4)),4*t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(16*t1+Ny+29,4)),floord(32*t2+Ny+28,4)),floord(32*t1-32*t2+Nz+Ny+27,4));t3++) {
for (t4=max(max(max(0,ceild(t1-63,64)),ceild(32*t2-Nz-1020,1024)),ceild(4*t3-Ny-1020,1024));t4<=min(min(min(min(floord(4*t3+Nx,1024),floord(Nt+Nx-4,1024)),floord(16*t1+Nx+29,1024)),floord(32*t2+Nx+28,1024)),floord(32*t1-32*t2+Nz+Nx+27,1024));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),4*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),4*t3+2),1024*t4+1022),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(1024*t4,t5+1);
ubv=min(1024*t4+1023,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file utils.h
* \brief Basic utilility functions.
*/
#ifndef MXNET_COMMON_UTILS_H_
#define MXNET_COMMON_UTILS_H_
#include <dmlc/logging.h>
#include <dmlc/omp.h>
#include <nnvm/graph.h>
#include <nnvm/node.h>
#include <mxnet/imperative.h>
#include <mxnet/engine.h>
#include <mxnet/ndarray.h>
#include <mxnet/storage.h>
#include <mxnet/op_attr_types.h>
#include <mxnet/graph_attr_types.h>
#include <nnvm/graph_attr_types.h>
#include <memory>
#include <vector>
#include <type_traits>
#include <utility>
#include <random>
#include <string>
#include <thread>
#include <algorithm>
#include <functional>
#include <limits>
#include "../operator/mxnet_op.h"
#if MXNET_USE_ONEDNN == 1
#include "../operator/nn/dnnl/dnnl_base-inl.h"
#endif
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
#include <windows.h>
#else
#include <unistd.h>
#endif
namespace mxnet {
namespace common {
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
inline size_t current_process_id() {
return ::GetCurrentProcessId();
}
#else
inline size_t current_process_id() {
return getpid();
}
#endif
/*!
* \brief IndPtr should be non-negative, in non-decreasing order, start with 0
* and end with value equal with size of indices.
*/
struct csr_indptr_check {
template <typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i,
DType* out,
const IType* indptr,
const nnvm::dim_t end,
const nnvm::dim_t idx_size) {
if (indptr[i + 1] < 0 || indptr[i + 1] < indptr[i] || (i == 0 && indptr[i] != 0) ||
(i == end - 1 && indptr[end] != idx_size))
*out = kCSRIndPtrErr;
}
};
/*!
* \brief Indices should be non-negative, less than the number of columns
* and in ascending order per row.
*/
struct csr_idx_check {
template <typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i,
DType* out,
const IType* idx,
const RType* indptr,
const nnvm::dim_t ncols) {
for (RType j = indptr[i]; j < indptr[i + 1]; j++) {
if (idx[j] >= ncols || idx[j] < 0 || (j < indptr[i + 1] - 1 && idx[j] >= idx[j + 1])) {
*out = kCSRIdxErr;
break;
}
}
}
};
/*!
* \brief Indices of RSPNDArray should be non-negative,
* less than the size of first dimension and in ascending order
*/
struct rsp_idx_check {
template <typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i,
DType* out,
const IType* idx,
const nnvm::dim_t end,
const nnvm::dim_t nrows) {
if ((i < end && idx[i + 1] <= idx[i]) || idx[i] < 0 || idx[i] >= nrows)
*out = kRSPIdxErr;
}
};
template <typename xpu>
void CheckFormatWrapper(const RunContext& rctx,
const NDArray& input,
const TBlob& err_cpu,
const bool full_check);
/*!
* \brief Check the validity of CSRNDArray.
* \param rctx Execution context.
* \param input Input NDArray of CSRStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template <typename xpu>
void CheckFormatCSRImpl(const RunContext& rctx,
const NDArray& input,
const TBlob& err_cpu,
const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kCSRStorage) << "CheckFormatCSRImpl is for CSRNDArray";
const mxnet::TShape shape = input.shape();
const mxnet::TShape idx_shape = input.aux_shape(csr::kIdx);
const mxnet::TShape indptr_shape = input.aux_shape(csr::kIndPtr);
const mxnet::TShape storage_shape = input.storage_shape();
if ((shape.ndim() != 2) ||
(idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) ||
(indptr_shape[0] != shape[0] + 1) || (idx_shape[0] != storage_shape[0])) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kCSRShapeErr;
});
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, {
mshadow::Stream<xpu>* s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<csr_indptr_check, xpu>::Launch(s,
indptr_shape[0] - 1,
val_xpu.dptr<DType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(),
indptr_shape[0] - 1,
idx_shape[0]);
// no need to check indices if indices are empty
if (idx_shape[0] != 0) {
Kernel<csr_idx_check, xpu>::Launch(s,
indptr_shape[0] - 1,
val_xpu.dptr<DType>(),
input.aux_data(csr::kIdx).dptr<IType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(),
shape[1]);
}
mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s);
});
});
});
}
}
/*!
* \brief Check the validity of RowSparseNDArray.
* \param rctx Execution context.
* \param input Input NDArray of RowSparseStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template <typename xpu>
void CheckFormatRSPImpl(const RunContext& rctx,
const NDArray& input,
const TBlob& err_cpu,
const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kRowSparseStorage) << "CheckFormatRSPImpl is for RSPNDArray";
const mxnet::TShape idx_shape = input.aux_shape(rowsparse::kIdx);
if (idx_shape[0] != input.storage_shape()[0]) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kRSPShapeErr;
});
return;
}
if (idx_shape[0] == 0) {
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, {
mshadow::Stream<xpu>* s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<rsp_idx_check, xpu>::Launch(s,
idx_shape[0],
val_xpu.dptr<DType>(),
input.aux_data(rowsparse::kIdx).dptr<IType>(),
idx_shape[0] - 1,
input.shape()[0]);
mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s);
});
});
}
}
template <typename xpu>
void CheckFormatImpl(const RunContext& rctx,
const NDArray& input,
const TBlob& err_cpu,
const bool full_check) {
int stype = input.storage_type();
if (stype == kCSRStorage) {
CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kRowSparseStorage) {
CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kDefaultStorage) {
// no-op for default storage
} else {
LOG(FATAL) << "Unknown storage type " << stype;
}
}
/*! \brief Pick rows specified by user input index array from a row sparse ndarray
* and save them in the output sparse ndarray.
*/
template <typename xpu>
void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu>* s,
const NDArray& input_nd,
const TBlob& idx_data,
const OpReqType req,
NDArray* output_nd);
/* \brief Casts tensor storage type to the new type.
*/
template <typename xpu>
void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output);
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype`.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype) {
if (!vstorage.empty()) {
for (const auto& i : vstorage) {
if (i != stype)
return false;
}
return true;
}
return false;
}
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype1`
* or `stype2'. Sets boolean if both found.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool* has_both) {
if (has_both) {
*has_both = false;
}
if (!vstorage.empty()) {
uint8_t has = 0;
for (const auto i : vstorage) {
if (i == stype1) {
has |= 1;
} else if (i == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as target `stype`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() != stype) {
return false;
}
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as targets `stype1` or `stype2`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool* has_both) {
if (has_both) {
*has_both = false;
}
if (!ndarrays.empty()) {
uint8_t has = 0;
for (const auto& nd : ndarrays) {
const NDArrayStorageType stype = nd.storage_type();
if (stype == stype1) {
has |= 1;
} else if (stype == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if storage type of any array in `ndarrays`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() == stype) {
return true;
}
}
}
return false;
}
/*! \brief returns true if any storage type `ndstype` in `ndstypes`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<int>& ndstypes, const NDArrayStorageType stype) {
if (!ndstypes.empty()) {
for (const auto& ndstype : ndstypes) {
if (ndstype == stype) {
return true;
}
}
}
return false;
}
/*! \brief get string representation of dispatch_mode */
inline std::string dispatch_mode_string(const DispatchMode x) {
switch (x) {
case DispatchMode::kFCompute:
return "fcompute";
case DispatchMode::kFComputeEx:
return "fcompute_ex";
case DispatchMode::kFComputeFallback:
return "fcompute_fallback";
case DispatchMode::kVariable:
return "variable";
case DispatchMode::kUndefined:
return "undefined";
}
return "unknown";
}
/*! \brief get string representation of storage_type */
inline std::string stype_string(const int x) {
switch (x) {
case kDefaultStorage:
return "default";
case kCSRStorage:
return "csr";
case kRowSparseStorage:
return "row_sparse";
}
return "unknown";
}
/*! \brief get string representation of device type */
inline std::string dev_type_string(const int dev_type) {
switch (dev_type) {
case Context::kCPU:
return "cpu";
case Context::kGPU:
return "gpu";
case Context::kCPUPinned:
return "cpu_pinned";
case Context::kCPUShared:
return "cpu_shared";
}
return "unknown";
}
inline std::string attr_value_string(const nnvm::NodeAttrs& attrs,
const std::string& attr_name,
std::string default_val = "") {
if (attrs.dict.find(attr_name) == attrs.dict.end()) {
return default_val;
}
return attrs.dict.at(attr_name);
}
/*! \brief Seeks an attribute in a node and its subgraphs and invokes a function on each. */
template <typename Fn>
inline void attr_foreach(const nnvm::NodeAttrs& attrs, const std::string& attr_name, const Fn& fn) {
const auto& found_it = attrs.dict.find(attr_name);
if (found_it != attrs.dict.end()) {
fn(found_it->second);
}
for (const auto& subgraph : attrs.subgraphs) {
DFSVisit(subgraph->outputs,
[&](const nnvm::ObjectPtr& node) { attr_foreach(node->attrs, attr_name, fn); });
}
}
template <typename ValueType>
inline ValueType flag_attr_accumulate(const nnvm::NodeAttrs& attrs, const std::string& attr_name) {
static_assert(std::is_integral<ValueType>::value, "ValueType must be an integral type.");
ValueType result = 0;
attr_foreach(attrs, attr_name, [&](const std::string& attr_value) {
std::istringstream ss(attr_value);
ValueType temp;
ss >> temp;
result |= temp;
if (ss.fail() || !ss.eof()) {
LOG(WARNING) << "Incorrect value of an attribute: " << attr_name
<< ". Expected an integer, while got: " << attr_value;
}
});
return result;
}
/*! \brief get string representation of the operator stypes */
inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>& in_attrs,
const std::vector<int>& out_attrs) {
std::ostringstream os;
os << "operator = " << attrs.op->name << "\ninput storage types = [";
for (const int attr : in_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "output storage types = [";
for (const int attr : out_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "params = {";
for (auto kv : attrs.dict) {
os << "\"" << kv.first << "\" : " << kv.second << ", ";
}
os << "}\n"
<< "context.dev_mask = " << dev_type_string(dev_mask);
return os.str();
}
/*! \brief get string representation of the operator */
inline std::string operator_string(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
std::string result = "";
std::vector<int> in_stypes;
std::vector<int> out_stypes;
in_stypes.reserve(inputs.size());
out_stypes.reserve(outputs.size());
auto xform = [](const NDArray arr) -> int { return arr.storage_type(); };
std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform);
std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform);
result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes);
return result;
}
/*! \brief log message once. Intended for storage fallback warning messages. */
inline void LogOnce(const std::string& message) {
typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore;
auto log_store = LogStore::Get();
if (log_store->find(message) == log_store->end()) {
LOG(INFO) << message;
log_store->insert(message);
}
}
/*! \brief log storage fallback event
*/
inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>* in_attrs,
const std::vector<int>* out_attrs) {
static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true);
if (!log)
return;
const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
std::ostringstream os;
const char* warning =
"\n WARNING:\n"
"Execution of the operator above will fallback to the generic implementation "
#if MXNET_USE_ONEDNN == 1
"(not utilizing kernels from oneDNN library) "
#endif
"with default dense storage type. You are seeing this warning message because "
#if MXNET_USE_ONEDNN == 1
"MXNET_ONEDNN_ENABLED flag is set to 0, in which case you can re-enable the default "
"execution path by setting MXNET_ONEDNN_ENABLED back to 1, or "
#endif
"the operator above is unable to process the given ndarrays with specified storage types, "
"context and/or parameter, in which case temporary dense ndarrays are generated in order to "
"execute the operator. The fallback does not affect the correctness of the programme. Using "
"default storage type performance degradation might be observed. \nYou can set environment "
"variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to 0 to suppress this warning.";
os << "\nStorage type fallback detected:\n" << op_str << warning;
LogOnce(os.str());
#if MXNET_USE_ONEDNN == 1
if (GetDNNLCacheSize() != -1)
common::LogOnce(
"MXNET_ONEDNN_CACHE_NUM is set."
"Should only be set if "
"your model has variable input shapes, "
"as cache size may grow unbounded");
#endif
}
// heuristic to dermine number of threads per GPU
inline int GetNumThreadsPerGPU() {
// This is resource efficient option.
return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2);
}
// heuristic to get number of matching colors.
// this decides how much parallelism we can get in each GPU.
inline int GetExecNumMatchColor() {
// This is resource efficient option.
int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1);
return std::min(num_match_color, GetNumThreadsPerGPU());
}
template <typename T, typename V>
V ParallelAccumulate(const T* a, const int n, V start) {
V sum = start;
#pragma omp parallel for reduction(+ : sum)
for (int i = 0; i < n; ++i) {
sum += a[i];
}
return sum;
}
/*!
* \brief
* Helper function for ParallelSort.
* DO NOT call this function directly.
* Use the interface ParallelSort instead.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template <typename RandomIt, typename Compare>
void ParallelSortHelper(RandomIt first, size_t len, size_t grainsize, const Compare& comp) {
if (len < grainsize) {
std::sort(first, first + len, comp);
} else {
std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len / 2, grainsize, comp);
ParallelSortHelper(first + len / 2, len - len / 2, grainsize, comp);
thr.join();
std::inplace_merge(first, first + len / 2, first + len, comp);
}
}
/*!
* \brief
* Sort the elements in the range [first, last) into the ascending order defined by
* the comparator comp.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template <typename RandomIt, typename Compare>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) {
const auto num = std::distance(first, last);
size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024 * 16));
ParallelSortHelper(first, num, grainsize, comp);
}
/*!
* \brief
* Sort the elements in the range [first, last) into ascending order.
* The elements are compared using the default < operator.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template <typename RandomIt>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) {
ParallelSort(
first, last, num_threads, std::less<typename std::iterator_traits<RandomIt>::value_type>());
}
/*!
* \brief Random Engine
*/
typedef std::mt19937 RANDOM_ENGINE;
/*!
* \brief Helper functions.
*/
namespace helper {
/*!
* \brief Helper for non-array type `T`.
*/
template <class T>
struct UniqueIf {
/*!
* \brief Type of `T`.
*/
using SingleObject = std::unique_ptr<T>;
};
/*!
* \brief Helper for an array of unknown bound `T`.
*/
template <class T>
struct UniqueIf<T[]> {
/*!
* \brief Type of `T`.
*/
using UnknownBound = std::unique_ptr<T[]>;
};
/*!
* \brief Helper for an array of known bound `T`.
*/
template <class T, size_t kSize>
struct UniqueIf<T[kSize]> {
/*!
* \brief Type of `T`.
*/
using KnownBound = void;
};
} // namespace helper
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs a non-array type `T`. The arguments `args` are passed to the
* constructor of `T`. The function does not participate in the overload
* resolution if `T` is an array type.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param n The size of the array to construct.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs an array of unknown bound `T`. The function does not participate
* in the overload resolution unless `T` is an array of unknown bound.
*/
template <class T>
typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) {
using U = typename std::remove_extent<T>::type;
return std::unique_ptr<T>(new U[n]{});
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
*
* Constructs an arrays of known bound is disallowed.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete;
template <typename FCompType>
FCompType GetFCompute(const nnvm::Op* op, const std::string& name, const Context& ctx) {
static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>");
static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>");
if (ctx.dev_mask() == cpu::kDevMask) {
return fcompute_cpu.get(op, nullptr);
} else if (ctx.dev_mask() == gpu::kDevMask) {
return fcompute_gpu.get(op, nullptr);
} else {
LOG(FATAL) << "Unknown device mask " << ctx.dev_mask();
return nullptr;
}
}
/*!
* \brief Return the max integer value representable in the type `T` without loss of precision.
*/
template <typename T>
constexpr size_t MaxIntegerValue() {
return std::is_integral<T>::value ? std::numeric_limits<T>::max() :
size_t(2) << (std::numeric_limits<T>::digits - 1);
}
template <>
constexpr size_t MaxIntegerValue<mshadow::half::half_t>() {
return size_t(2) << 10;
}
template <>
constexpr size_t MaxIntegerValue<mshadow::bfloat::bf16_t>() {
return size_t(2) << 14;
}
MSHADOW_XINLINE int ilog2ul(size_t a) {
int k = 1;
while (a >>= 1)
++k;
return k;
}
MSHADOW_XINLINE int ilog2ui(unsigned int a) {
int k = 1;
while (a >>= 1)
++k;
return k;
}
/*!
* \brief Return an NDArray of all zeros.
*/
inline NDArray InitZeros(const NDArrayStorageType stype,
const mxnet::TShape& shape,
const Context& ctx,
const int dtype) {
// NDArray with default storage
if (stype == kDefaultStorage) {
NDArray ret(shape, ctx, false, dtype);
ret = 0;
return ret;
}
// NDArray with non-default storage. Storage allocation is always delayed.
return NDArray(stype, shape, ctx, true, dtype);
}
/*!
* \brief Helper to add a NDArray of zeros to a std::vector.
*/
inline void EmplaceBackZeros(const NDArrayStorageType stype,
const mxnet::TShape& shape,
const Context& ctx,
const int dtype,
std::vector<NDArray>* vec) {
// NDArray with default storage
if (stype == kDefaultStorage) {
vec->emplace_back(shape, ctx, false, dtype);
vec->back() = 0;
} else {
// NDArray with non-default storage. Storage allocation is always delayed.
vec->emplace_back(stype, shape, ctx, true, dtype);
}
}
/*!
* \brief parallelize copy by OpenMP.
*/
template <typename DType>
inline void ParallelCopy(DType* dst, const DType* src, index_t size) {
static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= copy_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] = src[i];
}
} else {
#pragma GCC diagnostic push
#if __GNUC__ >= 8
#pragma GCC diagnostic ignored "-Wclass-memaccess"
#endif
std::memcpy(dst, src, sizeof(DType) * size);
#pragma GCC diagnostic pop
}
}
/*!
* \breif parallelize add by OpenMP
*/
template <typename DType>
inline void ParallelAdd(DType* dst, const DType* src, index_t size) {
static index_t add_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= add_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
} else {
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
}
}
/*!
* \brief If numpy compatibility is turned off (default), the shapes passed in
* by users follow the legacy shape definition:
* 1. 0 ndim means the shape is completely unknown.
* 2. 0 dim size means the dim size is unknown.
* We need to convert those shapes to use the numpy shape definition:
* 1. 0 ndim means it's a scalar tensor.
* 2. -1 ndim means the shape is unknown.
* 3. 0 dim size means no elements in that dimension.
* 4. -1 dim size means the dimension's size is unknown.
* so that operator's infer shape function can work in backend.
* \param shape to be converted.
* Note: It is possible that the shape to be converted is already
* numpy compatible. For example, when a subgraph operator's infer
* shape function is called from the infer shape pass of the whole
* graph, its input/output shapes have been converted to numpy
* compatible shapes.
*/
inline void ConvertToNumpyShape(mxnet::TShape* shape) {
if (shape->ndim() == 0) { // legacy shape ndim = 0 means unknown
*shape = mxnet::TShape(); // unknown shape ndim = -1
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if ((*shape)[j] == 0) { // legacy shape dim_size = 0 means unknown
(*shape)[j] = -1; // unknown dim size = -1
}
}
}
}
inline void ConvertToNumpyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToNumpyShape(&(shapes->at(i)));
}
}
/*!
* \brief This is function is used to convert shapes returned by
* the infer shape functions/pass to the legacy shape definition.
*/
inline void ConvertToLegacyShape(mxnet::TShape* shape) {
if (!mxnet::ndim_is_known(*shape)) {
*shape = mxnet::TShape(0, -1);
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if (!mxnet::dim_size_is_known(*shape, j)) {
(*shape)[j] = 0;
}
}
}
}
inline void ConvertToLegacyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToLegacyShape(&(shapes->at(i)));
}
}
void ExecuteMonInputCallback(
const nnvm::IndexedGraph& idx,
const std::vector<NDArray*>& state_arrays,
size_t nid,
const std::function<void(const char*, const char*, void*)>& monitor_callback);
void ExecuteMonOutputCallback(
const nnvm::IndexedGraph& idx,
const std::vector<NDArray*>& state_arrays,
size_t nid,
const std::function<void(const char*, const char*, void*)>& monitor_callback);
inline mxnet::TShape CanonicalizeAxes(const mxnet::TShape& src) {
// convert negative axes to positive values
const int ndim = src.ndim();
mxnet::TShape axes = src;
for (int i = 0; i < ndim; ++i) {
if (axes[i] < 0) {
axes[i] += ndim;
}
CHECK(axes[i] >= 0 && axes[i] < ndim)
<< "axes[" << i << "]=" << axes[i] << " exceeds the range [" << 0 << ", " << ndim << ")";
}
return axes;
}
inline bool is_float(const int dtype) {
return dtype == mshadow::kFloat32 || dtype == mshadow::kFloat64 || dtype == mshadow::kFloat16;
}
inline bool is_bfloat(const int dtype) {
return dtype == mshadow::kBfloat16;
}
inline bool is_int(const int dtype) {
return dtype == mshadow::kUint8 || dtype == mshadow::kInt8 || dtype == mshadow::kUint16 ||
dtype == mshadow::kInt16 || dtype == mshadow::kUint32 || dtype == mshadow::kInt32 ||
dtype == mshadow::kUint64 || dtype == mshadow::kInt64;
}
inline bool is_signed_int(const int dtype) {
return dtype == mshadow::kInt8 || dtype == mshadow::kInt16 || dtype == mshadow::kInt32 ||
dtype == mshadow::kInt64;
}
inline bool is_unsigned_int(const int dtype) {
return dtype == mshadow::kUint8 || dtype == mshadow::kUint16 || dtype == mshadow::kUint32 ||
dtype == mshadow::kUint64;
}
static int bits_of(const int type_flag) {
switch (type_flag) {
case mshadow::kFloat32:
return sizeof(float) * CHAR_BIT;
case mshadow::kFloat64:
return sizeof(double) * CHAR_BIT;
case mshadow::kUint8:
return sizeof(uint8_t) * CHAR_BIT;
case mshadow::kInt32:
return sizeof(int32_t) * CHAR_BIT;
case mshadow::kInt8:
return sizeof(int8_t) * CHAR_BIT;
case mshadow::kInt64:
return sizeof(int64_t) * CHAR_BIT;
case mshadow::kBool:
return sizeof(bool) * CHAR_BIT;
case mshadow::kInt16:
return sizeof(int16_t) * CHAR_BIT;
case mshadow::kUint16:
return sizeof(uint16_t) * CHAR_BIT;
case mshadow::kUint32:
return sizeof(uint32_t) * CHAR_BIT;
case mshadow::kUint64:
return sizeof(uint64_t) * CHAR_BIT;
default: {
LOG(FATAL) << "Unknown type_flag=" << type_flag;
return -1;
}
}
}
inline int type_promotion(const int type1, const int type2) {
if (type1 == type2)
return type1;
if (is_float(type1) && is_float(type2)) {
if (type1 == mshadow::kFloat64 || type2 == mshadow::kFloat64) {
return mshadow::kFloat64;
}
if (type1 == mshadow::kFloat32 || type2 == mshadow::kFloat32) {
return mshadow::kFloat32;
}
return mshadow::kFloat16;
} else if (is_float(type1) || is_float(type2)) {
return is_float(type1) ? type1 : type2;
}
if (is_signed_int(type1) && is_signed_int(type2)) {
if (type1 == mshadow::kInt64 || type2 == mshadow::kInt64) {
return mshadow::kInt64;
}
if (type1 == mshadow::kInt32 || type2 == mshadow::kInt32) {
return mshadow::kInt32;
}
if (type1 == mshadow::kInt16 || type2 == mshadow::kInt16) {
return mshadow::kInt16;
}
return mshadow::kInt8;
} else if (is_unsigned_int(type1) && is_unsigned_int(type2)) {
if (type1 == mshadow::kUint64 || type2 == mshadow::kUint64) {
return mshadow::kUint64;
}
if (type1 == mshadow::kUint32 || type2 == mshadow::kUint32) {
return mshadow::kUint32;
}
if (type1 == mshadow::kUint16 || type2 == mshadow::kUint16) {
return mshadow::kUint16;
}
return mshadow::kUint8;
} else if (type1 == mshadow::kBool) {
return type2;
} else if (type2 == mshadow::kBool) {
return type1;
} else if (is_unsigned_int(type1) || is_unsigned_int(type2)) {
if (bits_of(type1) < bits_of(type2)) {
if (type1 == mshadow::kInt8 && type2 == mshadow::kUint16) {
return mshadow::kInt32;
} else if (type1 == mshadow::kInt8 && type2 == mshadow::kUint32) {
return mshadow::kInt64;
} else if (type1 == mshadow::kInt16 && type2 == mshadow::kUint32) {
return mshadow::kInt64;
} else if (type2 == mshadow::kUint64) {
LOG(FATAL) << "Unsupported type promotions between " << mshadow::dtype_string(type1)
<< " and " << mshadow::dtype_string(type2);
} else {
return type2;
}
} else if (bits_of(type2) < bits_of(type1)) {
if (type2 == mshadow::kInt8 && type1 == mshadow::kUint16) {
return mshadow::kInt32;
} else if (type2 == mshadow::kInt8 && type1 == mshadow::kUint32) {
return mshadow::kInt64;
} else if (type2 == mshadow::kInt16 && type1 == mshadow::kUint32) {
return mshadow::kInt64;
} else if (type1 == mshadow::kUint64) {
LOG(FATAL) << "Unsupported type promotions between " << mshadow::dtype_string(type1)
<< " and " << mshadow::dtype_string(type2);
} else {
return type1;
}
} else {
if (type1 == mshadow::kUint8 || type2 == mshadow::kUint8) {
return mshadow::kInt16;
}
if (type1 == mshadow::kUint16 || type2 == mshadow::kUint16) {
return mshadow::kInt32;
}
if (type1 == mshadow::kUint32 || type2 == mshadow::kUint32) {
return mshadow::kInt64;
}
}
}
LOG(FATAL) << "Unsupported type promotions between " << mshadow::dtype_string(type1) << " and "
<< mshadow::dtype_string(type2);
return -1;
}
inline const std::string NodeAttrsGetProfilerScope(const nnvm::NodeAttrs& attrs) {
// obtain the profiler scope name, if assigned previously
std::string profiler_scope = MXNET_STORAGE_DEFAULT_PROFILER_SCOPE_CSTR;
const std::unordered_map<std::string, std::string>& node_attrs_dict = attrs.dict;
const std::unordered_map<std::string, std::string>::const_iterator profiler_scope_iter =
node_attrs_dict.find("__profiler_scope__");
if (profiler_scope_iter != node_attrs_dict.end()) {
profiler_scope = profiler_scope_iter->second;
}
return profiler_scope;
}
inline int GetDefaultDtype() {
return Imperative::Get()->is_np_default_dtype() ? mshadow::kFloat64 : mshadow::kFloat32;
}
inline int GetDefaultDtype(int dtype) {
if (dtype != -1)
return dtype;
return Imperative::Get()->is_np_default_dtype() ? mshadow::kFloat64 : mshadow::kFloat32;
}
struct MShadowTypeInfo {
std::string name;
int size;
int acc_size;
MShadowTypeInfo(const std::string name, const int size, const int acc_size)
: name(std::move(name)), size(size), acc_size(acc_size) {}
MShadowTypeInfo(const std::string name, const int size) : MShadowTypeInfo(name, size, size) {}
};
MShadowTypeInfo mshadow_type_info(const int type_flag);
inline bool AlignedMemAlloc(void** ptr, size_t size, size_t alignment) {
#if _MSC_VER
*ptr = _aligned_malloc(size, alignment);
if (*ptr == nullptr)
return false;
#else
int res = posix_memalign(ptr, alignment, size);
if (res != 0)
return false;
#endif
return true;
}
inline void AlignedMemFree(void* ptr) {
#if _MSC_VER
_aligned_free(ptr);
#else
free(ptr);
#endif
}
inline index_t div_round(const index_t a, const index_t b) {
return (a + b - 1) / b;
}
inline bool IsPower2(size_t N) {
return ((N & (N - 1)) == 0) && N != 0;
}
inline size_t RoundToPower2(size_t N) {
size_t ret = 1;
size_t copyN = N;
while (N >= 2) {
ret *= 2;
N /= 2;
}
if (ret < copyN) {
ret *= 2;
}
return ret;
}
} // namespace common
} // namespace mxnet
#endif // MXNET_COMMON_UTILS_H_
|
proj_EM_step.c | /*
NAME:
proj_EM_step
PURPOSE:
one proj_EM step
CALLING SEQUENCE:
proj_EM_step(struct datapoint * data, int N, struct gaussian * gaussians,
int K,bool * fixamp, bool * fixmean, bool * fixcovar,
double * avgloglikedata, bool likeonly, double w, bool noproj,
bool diagerrs, bool noweight)
INPUT:
data - the data
N - number of data points
gaussians - model gaussians
K - number of model gaussians
fixamp - fix the amplitude?
fixmean - fix the mean?
fixcovar - fix the covar?
likeonly - only compute likelihood?
w - regularization parameter
noproj - don't perform any projections
diagerrs - the data->SS errors-squared are diagonal
noweight - don't use data-weights
OUTPUT:
avgloglikedata - average loglikelihood of the data
REVISION HISTORY:
2008-09-21 - Written Bovy
2010-03-01 Added noproj option - Bovy
2010-04-01 Added noweight option - Bovy
*/
#ifdef _OPENMP
#include <omp.h>
#endif
#include <math.h>
//#include <time.h>
//#include <sys/time.h>
#include <float.h>
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_vector.h>
#include <gsl/gsl_linalg.h>
#include <gsl/gsl_blas.h>
#include <proj_gauss_mixtures.h>
#define CHUNKSIZE 1
void proj_EM_step(struct datapoint * data, int N,
struct gaussian * gaussians, int K,bool * fixamp,
bool * fixmean, bool * fixcovar, double * avgloglikedata,
bool likeonly, double w, bool noproj, bool diagerrs,
bool noweight){
*avgloglikedata = 0.0;
//struct timeval start,time1, time2, time3, time4, time5,end;
struct datapoint * thisdata;
struct gaussian * thisgaussian;
struct gaussian * thisnewgaussian;
int signum,di;
double exponent;
double currqij;
struct modelbs * thisbs;
int d = (gaussians->VV)->size1;//dim of mm
//gettimeofday(&start,NULL);
//Initialize new parameters
int kk;
for (kk=0; kk != K*nthreads; ++kk){
newgaussians->alpha = 0.0;
gsl_vector_set_zero(newgaussians->mm);
gsl_matrix_set_zero(newgaussians->VV);
++newgaussians;
}
newgaussians= startnewgaussians;
//gettimeofday(&time1,NULL);
//check whether for some Gaussians none of the parameters get updated
double sumfixedamps= 0;
bool * allfixed = (bool *) calloc(K, sizeof (bool) );
double ampnorm;
for (kk=0; kk != K; ++kk){
if (*fixamp == true){
sumfixedamps += gaussians->alpha;
}
++gaussians;
if (*fixamp == true && *fixmean == true && *fixcovar == true)
*allfixed= true;
++allfixed;
++fixamp;
++fixmean;
++fixcovar;
}
gaussians -= K;
allfixed -= K;
fixamp -= K;
fixmean -= K;
fixcovar -= K;
//gettimeofday(&time2,NULL);
//now loop over data and gaussians to update the model parameters
int ii, jj, ll;
double sumSV;
int chunk;
chunk= CHUNKSIZE;
#pragma omp parallel for schedule(static,chunk) \
private(tid,di,signum,exponent,ii,jj,ll,kk,Tij,Tij_inv,wminusRm,p,VRTTinv,sumSV,VRT,TinvwminusRm,Rtrans,thisgaussian,thisdata,thisbs,thisnewgaussian,currqij) \
shared(newgaussians,gaussians,bs,allfixed,K,d,data,avgloglikedata)
for (ii = 0 ; ii < N; ++ii){
thisdata= data+ii;
#ifdef _OPENMP
tid= omp_get_thread_num();
#else
tid = 0;
#endif
di = (thisdata->SS)->size1;
//printf("Datapoint has dimension %i\n",di);
p = gsl_permutation_alloc (di);
wminusRm = gsl_vector_alloc (di);
TinvwminusRm = gsl_vector_alloc (di);
Tij = gsl_matrix_alloc(di,di);
Tij_inv = gsl_matrix_alloc(di,di);
if ( ! noproj ) VRT = gsl_matrix_alloc(d,di);
VRTTinv = gsl_matrix_alloc(d,di);
if ( ! noproj ) Rtrans = gsl_matrix_alloc(d,di);
for (jj = 0; jj != K; ++jj){
//printf("%i,%i\n",(thisdata->ww)->size,wminusRm->size);
gsl_vector_memcpy(wminusRm,thisdata->ww);
//fprintf(stdout,"Where is the seg fault?\n");
thisgaussian= gaussians+jj;
//prepare...
if ( ! noproj ) {
if ( diagerrs ) {
gsl_matrix_set_zero(Tij);
for (ll = 0; ll != di; ++ll)
gsl_matrix_set(Tij,ll,ll,gsl_matrix_get(thisdata->SS,ll,0));}
else
gsl_matrix_memcpy(Tij,thisdata->SS);
}
//Calculate Tij
if ( ! noproj ) {
gsl_matrix_transpose_memcpy(Rtrans,thisdata->RR);
gsl_blas_dsymm(CblasLeft,CblasUpper,1.0,thisgaussian->VV,Rtrans,0.0,VRT);//Only the upper right part of VV is calculated --> use only that part
gsl_blas_dgemm(CblasNoTrans,CblasNoTrans,1.0,thisdata->RR,VRT,1.0,Tij);}//This is Tij
else {
if ( diagerrs ) {
for (kk = 0; kk != d; ++kk){
gsl_matrix_set(Tij,kk,kk,gsl_matrix_get(thisdata->SS,kk,0)+gsl_matrix_get(thisgaussian->VV,kk,kk));
for (ll = kk+1; ll != d; ++ll){
sumSV= gsl_matrix_get(thisgaussian->VV,kk,ll);
gsl_matrix_set(Tij,kk,ll,sumSV);
gsl_matrix_set(Tij,ll,kk,sumSV);}}}
else {
for (kk = 0; kk != d; ++kk){
gsl_matrix_set(Tij,kk,kk,gsl_matrix_get(thisdata->SS,kk,kk)+gsl_matrix_get(thisgaussian->VV,kk,kk));
for (ll = kk+1; ll != d; ++ll){
sumSV= gsl_matrix_get(thisdata->SS,kk,ll)+gsl_matrix_get(thisgaussian->VV,kk,ll);
gsl_matrix_set(Tij,kk,ll,sumSV);
gsl_matrix_set(Tij,ll,kk,sumSV);}}}}
//gsl_matrix_add(Tij,thisgaussian->VV);}
//Calculate LU decomp of Tij and Tij inverse
gsl_linalg_LU_decomp(Tij,p,&signum);
gsl_linalg_LU_invert(Tij,p,Tij_inv);
//Calculate Tijinv*(w-Rm)
if ( ! noproj ) gsl_blas_dgemv(CblasNoTrans,-1.0,thisdata->RR,thisgaussian->mm,1.0,wminusRm);
else gsl_vector_sub(wminusRm,thisgaussian->mm);
//printf("wminusRm = %f\t%f\n",gsl_vector_get(wminusRm,0),gsl_vector_get(wminusRm,1));
gsl_blas_dsymv(CblasUpper,1.0,Tij_inv,wminusRm,0.0,TinvwminusRm);
//printf("TinvwminusRm = %f\t%f\n",gsl_vector_get(TinvwminusRm,0),gsl_vector_get(TinvwminusRm,1));
gsl_blas_ddot(wminusRm,TinvwminusRm,&exponent);
//printf("Exponent = %f\nDet = %f\n",exponent,gsl_linalg_LU_det(Tij,signum));
gsl_matrix_set(qij,ii,jj,log(thisgaussian->alpha) - di * halflogtwopi - 0.5 * gsl_linalg_LU_lndet(Tij) -0.5 * exponent);//This is actually the log of qij
//printf("Here we have = %f\n",gsl_matrix_get(qij,ii,jj));
//Now calculate bij and Bij
thisbs= bs+tid*K+jj;
gsl_vector_memcpy(thisbs->bbij,thisgaussian->mm);
//printf("%i,%i,%i\n",tid,ii,jj);
if ( ! noproj ) gsl_blas_dgemv(CblasNoTrans,1.0,VRT,TinvwminusRm,1.0,thisbs->bbij);
else gsl_blas_dsymv(CblasUpper,1.0,thisgaussian->VV,TinvwminusRm,1.0,thisbs->bbij);
//printf("bij = %f\t%f\n",gsl_vector_get(bs->bbij,0),gsl_vector_get(bs->bbij,1));
gsl_matrix_memcpy(thisbs->BBij,thisgaussian->VV);
if ( ! noproj ) {
gsl_blas_dgemm(CblasNoTrans,CblasNoTrans,1.0,VRT,Tij_inv,0.0,VRTTinv);
gsl_blas_dgemm(CblasNoTrans,CblasTrans,-1.0,VRTTinv,VRT,1.0,thisbs->BBij);}
else {
gsl_blas_dsymm(CblasLeft,CblasUpper,1.0,thisgaussian->VV,Tij_inv,0.0,VRTTinv);
gsl_blas_dsymm(CblasRight,CblasUpper,-1.0,thisgaussian->VV,VRTTinv,1.0,thisbs->BBij);}
gsl_blas_dsyr(CblasUpper,1.0,thisbs->bbij,thisbs->BBij);//This is bijbijT + Bij, which is the relevant quantity
}
gsl_permutation_free (p);
gsl_vector_free(wminusRm);
gsl_vector_free(TinvwminusRm);
gsl_matrix_free(Tij);
gsl_matrix_free(Tij_inv);
if ( ! noproj ) gsl_matrix_free(VRT);
gsl_matrix_free(VRTTinv);
if ( ! noproj ) gsl_matrix_free(Rtrans);
//Again loop over the gaussians to update the model(can this be more efficient? in any case this is not so bad since generally K << N)
#pragma omp critical
{
//Normalize qij properly
*avgloglikedata += normalize_row(qij,ii,true,noweight,thisdata->logweight);
}
//printf("qij = %f\t%f\n",gsl_matrix_get(qij,ii,0),gsl_matrix_get(qij,ii,1));
//printf("avgloglgge = %f\n",*avgloglikedata);
for (jj = 0; jj != K; ++jj){
currqij = exp(gsl_matrix_get(qij,ii,jj));
//printf("Current qij = %f\n",currqij);
thisbs= bs+tid*K+jj;
thisnewgaussian= newgaussians+tid*K+jj;
gsl_vector_scale(thisbs->bbij,currqij);
gsl_vector_add(thisnewgaussian->mm,thisbs->bbij);
gsl_matrix_scale(thisbs->BBij,currqij);
gsl_matrix_add(thisnewgaussian->VV,thisbs->BBij);
//printf("bij = %f\t%f\n",gsl_vector_get(bs->bbij,0),gsl_vector_get(bs->bbij,1));
//printf("Bij = %f\t%f\t%f\n",gsl_matrix_get(bs->BBij,0,0),gsl_matrix_get(bs->BBij,1,1),gsl_matrix_get(bs->BBij,0,1));
}
}
*avgloglikedata /= N;
if (likeonly) {
free(allfixed);
return;
}
//gettimeofday(&time3,NULL);
//gather newgaussians
if ( nthreads != 1 )
#pragma omp parallel for schedule(static,chunk) \
private(ll,jj)
for (jj = 0; jj < K; ++jj)
for (ll = 1; ll != nthreads; ++ll) {
gsl_vector_add((newgaussians+jj)->mm,(newgaussians+ll*K+jj)->mm);
gsl_matrix_add((newgaussians+jj)->VV,(newgaussians+ll*K+jj)->VV);
}
//gettimeofday(&time4,NULL);
//Now update the parameters
//Thus, loop over gaussians again!
double qj;
#pragma omp parallel for schedule(dynamic,chunk) \
private(jj,qj)
for (jj = 0; jj < K; ++jj){
if (*(allfixed+jj)){
continue;
}
else {
qj = exp(logsum(qij,jj,false));
(qj < DBL_MIN) ? qj = 0: 0;
//printf("qj = %f\n",qj);
if (*(fixamp+jj) != true) {
(gaussians+jj)->alpha = qj;
if (qj == 0) {//rethink this
*(fixamp+jj)=1;
*(fixmean+jj)=1;
*(fixcovar+jj)=1;
continue;
}
}
gsl_vector_scale((newgaussians+jj)->mm,1.0/qj);
if (*(fixmean+jj) != true){
gsl_vector_memcpy((gaussians+jj)->mm,(newgaussians+jj)->mm);
}
if (*(fixcovar+jj) != true){
// if (*(fixmean+jj) != true)
// gsl_blas_dsyr(CblasUpper,-qj,(gaussians+jj)->mm,(newgaussians+jj)->VV);
//else {
gsl_blas_dsyr(CblasUpper,qj,(gaussians+jj)->mm,(newgaussians+jj)->VV);
gsl_blas_dsyr2(CblasUpper,-qj,(gaussians+jj)->mm,(newgaussians+jj)->mm,(newgaussians+jj)->VV);
//}
if (w > 0.){
gsl_matrix_add((newgaussians+jj)->VV,I);
gsl_matrix_scale((newgaussians+jj)->VV,1.0/(qj+1.0));
}
else gsl_matrix_scale((newgaussians+jj)->VV,1.0/qj);
gsl_matrix_memcpy((gaussians+jj)->VV,(newgaussians+jj)->VV);
}
}
}
//gettimeofday(&time5,NULL);
//normalize the amplitudes
if ( sumfixedamps == 0. && noweight ){
for (kk=0; kk != K; ++kk){
if ( noweight ) (gaussians++)->alpha /= (double) N;
}
}
else {
ampnorm= 0;
for (kk=0; kk != K; ++kk){
if (*(fixamp++) == false) ampnorm += gaussians->alpha;
++gaussians;
}
fixamp -= K;
gaussians -= K;
for (kk=0; kk != K; ++kk){
if (*(fixamp++) == false){
gaussians->alpha /= ampnorm;
gaussians->alpha *= (1. - sumfixedamps);
}
++gaussians;
}
fixamp -= K;
gaussians -= K;
}
//gettimeofday(&end,NULL);
//double diff, diff1, diff2, diff3, diff4, diff5,diff6;
//diff= difftime (end.tv_sec,start.tv_sec)+difftime (end.tv_usec,start.tv_usec)/1000000;
//diff1= (difftime(time1.tv_sec,start.tv_sec)+difftime(time1.tv_usec,start.tv_usec)/1000000)/diff;
//diff2= (difftime(time2.tv_sec,time1.tv_sec)+difftime(time2.tv_usec,time1.tv_usec)/1000000)/diff;
//diff3= (difftime(time3.tv_sec,time2.tv_sec)+difftime(time3.tv_usec,time2.tv_usec)/1000000)/diff;
//diff4= (difftime(time4.tv_sec,time3.tv_sec)+difftime(time4.tv_usec,time3.tv_usec)/1000000)/diff;
//diff5= (difftime(time5.tv_sec,time4.tv_sec)+difftime(time5.tv_usec,time4.tv_usec)/1000000)/diff;
//diff6= (difftime(end.tv_sec,time5.tv_sec)+difftime(end.tv_usec,time5.tv_usec)/1000000)/diff;
//printf("%f,%f,%f,%f,%f,%f,%f\n",diff,diff1,diff2,diff3,diff4,diff5,diff6);
free(allfixed);
return;
}
|
openmp-ex28.c | #include <stdio.h>
int main(void)
{
int tickets_out = 0;
#pragma omp parallel
{
int my_ticket;
/* Only one thread may enter a critical region at a time.
* In fact, for most of these examples I have had threads writing to the
* stdout stream without any kind of protection between them. This has
* worked only because of the low number of threads and luck. In general,
* only one thread should own a stream at a time to prevent garbled
* messages */
#pragma omp critical
{
my_ticket = tickets_out++;
printf("My ticket is %d\n",my_ticket);
}
}
return 0;
}
|
flush.c | //////////////////////////////////////////////////////////////
//
// flush.c
//
// Copyright (c) 2017, Hassan Salehe Matar
// All rights reserved.
//
// This file is part of Clanomp. For details, see
// https://github.com/hassansalehe/Clanomp. Please also
// see the LICENSE file for additional BSD notice
//
// Redistribution and use in source and binary forms, with
// or without modification, are permitted provided that
// the following conditions are met:
//
// * Redistributions of source code must retain the above
// copyright notice, this list of conditions and the
// following disclaimer.
//
// * Redistributions in binary form must reproduce the
// above copyright notice, this list of conditions and
// the following disclaimer in the documentation and/or
// other materials provided with the distribution.
//
// * Neither the name of the copyright holder nor the names
// of its contributors may be used to endorse or promote
// products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
// CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
// SUCH DAMAGE.
//
//////////////////////////////////////////////////////////////
// From the OpenMP specification:
//
// * Makes a thread’s temporary view of memory consistent with
// memory and enforces an order on the memory operations of
// the variables explicitly specified or implied
//
// * The binding thread set for a flush region is the encountering
// thread. Execution of a flush region affects the memory and
// the temporary view of memory of only the thread that executes
// the region. It does not affect the temporary view of other
// threads. Other threads must themselves execute a flush operation
// in order to be guaranteed to observe the effects of the
// encountering thread’s flush operation
//
// * A barrier also implies a flush
//
// References:
// 1. http://www.openmp.org/wp-content/uploads/openmp-examples-4.5.0.pdf
// 2. http://www.openmp.org/wp-content/uploads/openmp-4.5.pdf
#include <stdio.h>
#include <omp.h>
int main() {
int count = 0;
#pragma omp parallel shared(count)
{
#pragma omp flush(count)
count++;
#pragma omp flush(count)
}
printf("Value of count: %d, construct: <flush>\n", count);
return 0;
}
|
GB_unaryop__minv_uint8_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint8_fp32
// op(A') function: GB_tran__minv_uint8_fp32
// C type: uint8_t
// A type: float
// cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8)
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
float
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z ; GB_CAST_UNSIGNED(z,x,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint8_fp32
(
uint8_t *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint8_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
poissonSORRB.c | #include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
#define TIME_RES 1000
double** w;
double** u;
double initial_time;
double clearcache [30000000];
int iter;
double p;
double tol;
double diff;
//número de pontos da grelha
int N;
void clearCache (){
int i;
for(i=0; i<30000000;i++)
clearcache[i]=i;
}
void start(){
double time= omp_get_wtime();
initial_time= time* TIME_RES;
}
double stop(){
double time = omp_get_wtime();
double final = time * TIME_RES;
return final - initial_time;
}
void initialize_matrices(){
int i, j;
u = (double**) malloc(sizeof(double*) * N);
w = (double**) malloc(sizeof(double*) * N);
// Preencher a matriz com 100 nos limites inferiores e laterais e 50 nos interiores
for(i = 0; i < N; i++){
w[i] = (double*) malloc(sizeof(double) * N);
u[i] = (double*) malloc(sizeof(double) * N);
for(j = 0; j < N; j++){
u[i][j]=0; //Inicializar a matriz u a zeros
if(i == N-1 || (j == 0 && i!=0) || (j == N-1 && i != 0)){ //fronteiras inferiores e laterais
w[i][j] = 100;
}else{
if(i ==0){
w[i][j] = 0; //valores do limite superior
}else {
w[i][j] = 50; //valores iniciais dos pontos interiores
}
}
}
}
}
// Dar print da matriz w
void print_matrix(){
int i, j;
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f ", w[i][j]);
}
printf("\n");
}
}
void free_matrices(){
free(u);
free(w);
}
//funcao que calcula a diferenca entre 2 vetores e guarda o resultado noutro vetor
double** diferenca(double** a, double** b){
double** result = (double**) malloc(N * sizeof(double));
int i, j;
for(i = 0; i < N; i++){
result[i] = (double*) malloc(N * sizeof(double*));
for(j = 0; j < N; j++){
result[i][j] = a[i][j] - b[i][j];
}
}
return result;
}
//funcao que retorna um vetor bi-dimensional com o valor absoluto de cada elemento
double** absol(double** vetor){
int i, j;
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
vetor[i][j] = fabs(vetor[i][j]);
}
}
return vetor;
}
//funcao que retorna o maior elemento de um vetor
double maximum(double** vetor){
double max = 0;
int i, j;
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
if(vetor[i][j] > max)
max = vetor[i][j];
}
}
return max;
}
void iguala(double **a, double**b){
int i, j;
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
a[i][j] = b[i][j];
}
}
}
void parallel(int threads){
int i,j;
while(diff > tol){
iguala(u,w);
#pragma omp parallel for private(i,j) schedule(static) num_threads(threads)
for(i = 1; i < N-1; i++){
for(j = 1 + (i%2); j < N-1; j += 2){
#pragma omp atomic write
w[i][j] = (1-p) * w[i][j] + p * (w[i-1][j] + w[i][j-1] + w[i][j+1] + w[i+1][j])/4;
}
}
#pragma omp parallel for private(i,j) schedule(static) num_threads(threads)
for(i = 1; i < N-1; i++){
for(j = 1 + ((i+1)%2); j < N-1; j += 2){
#pragma omp atomic write
w[i][j] = (1-p) * w[i][j] + p * (w[i-1][j] + w[i][j-1] + w[i][j+1] + w[i+1][j])/4;
}
}
iter++;
diff = maximum(absol(diferenca(w,u)));
}
}
int main(int argc, char* argv[]){
if(argc != 3){
printf ("Usage : ./ poissonSORRB <nr pontos da grelha> <numero threads>\n");
return 0;
}
N = atoi(argv[1]);
int threads = atoi(argv[2]);
int i, j;
clearCache();
// Preparar as matrizes para aplicar o algoritmo
initialize_matrices();
// print_matrix();
// parâmetro de relaxamento
p = 2/(1 + sin(M_PI/(N-1)));
tol = 1/(double)(N*N);
diff = (tol + 1);
iter = 0;
FILE* fp = fopen("results/resultados.csv","a");
start();
parallel(threads);
double tempo = stop();
//print_matrix();
fprintf(fp,"%d,%d,%2f,%d,%2f\n", N, threads, tol, iter, tempo);
fclose(fp);
free_matrices();
return 0;
}
|
feature.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF EEEEE AAA TTTTT U U RRRR EEEEE %
% F E A A T U U R R E %
% FFF EEE AAAAA T U U RRRR EEE %
% F E A A T U U R R E %
% F EEEEE A A T UUU R R EEEEE %
% %
% %
% MagickCore Image Feature Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/property.h"
#include "magick/animate.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/feature.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/image-private.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/timer.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l F e a t u r e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelFeatures() returns features for each channel in the image in
% each of four directions (horizontal, vertical, left and right diagonals)
% for the specified distance. The features include the angular second
% moment, contrast, correlation, sum of squares: variance, inverse difference
% moment, sum average, sum varience, sum entropy, entropy, difference variance,% difference entropy, information measures of correlation 1, information
% measures of correlation 2, and maximum correlation coefficient. You can
% access the red channel contrast, for example, like this:
%
% channel_features=GetImageChannelFeatures(image,1,exception);
% contrast=channel_features[RedChannel].contrast[0];
%
% Use MagickRelinquishMemory() to free the features buffer.
%
% The format of the GetImageChannelFeatures method is:
%
% ChannelFeatures *GetImageChannelFeatures(const Image *image,
% const size_t distance,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o distance: the distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t MagickAbsoluteValue(const ssize_t x)
{
if (x < 0)
return(-x);
return(x);
}
MagickExport ChannelFeatures *GetImageChannelFeatures(const Image *image,
const size_t distance,ExceptionInfo *exception)
{
typedef struct _ChannelStatistics
{
DoublePixelPacket
direction[4]; /* horizontal, vertical, left and right diagonals */
} ChannelStatistics;
CacheView
*image_view;
ChannelFeatures
*channel_features;
ChannelStatistics
**cooccurrence,
correlation,
*density_x,
*density_xy,
*density_y,
entropy_x,
entropy_xy,
entropy_xy1,
entropy_xy2,
entropy_y,
mean,
**Q,
*sum,
sum_squares,
variance;
LongPixelPacket
gray,
*grays;
MagickBooleanType
status;
register ssize_t
i;
size_t
length;
ssize_t
y,
z;
unsigned int
number_grays;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->columns < (distance+1)) || (image->rows < (distance+1)))
return((ChannelFeatures *) NULL);
length=CompositeChannels+1UL;
channel_features=(ChannelFeatures *) AcquireQuantumMemory(length,
sizeof(*channel_features));
if (channel_features == (ChannelFeatures *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_features,0,length*
sizeof(*channel_features));
/*
Form grays.
*/
grays=(LongPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays));
if (grays == (LongPixelPacket *) NULL)
{
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
for (i=0; i <= (ssize_t) MaxMap; i++)
{
grays[i].red=(~0U);
grays[i].green=(~0U);
grays[i].blue=(~0U);
grays[i].opacity=(~0U);
grays[i].index=(~0U);
}
status=MagickTrue;
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
grays[ScaleQuantumToMap(GetPixelRed(p))].red=
ScaleQuantumToMap(GetPixelRed(p));
grays[ScaleQuantumToMap(GetPixelGreen(p))].green=
ScaleQuantumToMap(GetPixelGreen(p));
grays[ScaleQuantumToMap(GetPixelBlue(p))].blue=
ScaleQuantumToMap(GetPixelBlue(p));
if (image->colorspace == CMYKColorspace)
grays[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index=
ScaleQuantumToMap(GetPixelIndex(indexes+x));
if (image->matte != MagickFalse)
grays[ScaleQuantumToMap(GetPixelOpacity(p))].opacity=
ScaleQuantumToMap(GetPixelOpacity(p));
p++;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
grays=(LongPixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
return(channel_features);
}
(void) ResetMagickMemory(&gray,0,sizeof(gray));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if (grays[i].red != ~0U)
grays[(ssize_t) gray.red++].red=grays[i].red;
if (grays[i].green != ~0U)
grays[(ssize_t) gray.green++].green=grays[i].green;
if (grays[i].blue != ~0U)
grays[(ssize_t) gray.blue++].blue=grays[i].blue;
if (image->colorspace == CMYKColorspace)
if (grays[i].index != ~0U)
grays[(ssize_t) gray.index++].index=grays[i].index;
if (image->matte != MagickFalse)
if (grays[i].opacity != ~0U)
grays[(ssize_t) gray.opacity++].opacity=grays[i].opacity;
}
/*
Allocate spatial dependence matrix.
*/
number_grays=gray.red;
if (gray.green > number_grays)
number_grays=gray.green;
if (gray.blue > number_grays)
number_grays=gray.blue;
if (image->colorspace == CMYKColorspace)
if (gray.index > number_grays)
number_grays=gray.index;
if (image->matte != MagickFalse)
if (gray.opacity > number_grays)
number_grays=gray.opacity;
cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays,
sizeof(*cooccurrence));
density_x=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_x));
density_xy=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_xy));
density_y=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_y));
Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q));
sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum));
if ((cooccurrence == (ChannelStatistics **) NULL) ||
(density_x == (ChannelStatistics *) NULL) ||
(density_xy == (ChannelStatistics *) NULL) ||
(density_y == (ChannelStatistics *) NULL) ||
(Q == (ChannelStatistics **) NULL) ||
(sum == (ChannelStatistics *) NULL))
{
if (Q != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
}
if (sum != (ChannelStatistics *) NULL)
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
if (density_y != (ChannelStatistics *) NULL)
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
if (density_xy != (ChannelStatistics *) NULL)
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
if (density_x != (ChannelStatistics *) NULL)
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
if (cooccurrence != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(
cooccurrence);
}
grays=(LongPixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
(void) ResetMagickMemory(&correlation,0,sizeof(correlation));
(void) ResetMagickMemory(density_x,0,2*(number_grays+1)*sizeof(*density_x));
(void) ResetMagickMemory(density_xy,0,2*(number_grays+1)*sizeof(*density_xy));
(void) ResetMagickMemory(density_y,0,2*(number_grays+1)*sizeof(*density_y));
(void) ResetMagickMemory(&mean,0,sizeof(mean));
(void) ResetMagickMemory(sum,0,number_grays*sizeof(*sum));
(void) ResetMagickMemory(&sum_squares,0,sizeof(sum_squares));
(void) ResetMagickMemory(density_xy,0,2*number_grays*sizeof(*density_xy));
(void) ResetMagickMemory(&entropy_x,0,sizeof(entropy_x));
(void) ResetMagickMemory(&entropy_xy,0,sizeof(entropy_xy));
(void) ResetMagickMemory(&entropy_xy1,0,sizeof(entropy_xy1));
(void) ResetMagickMemory(&entropy_xy2,0,sizeof(entropy_xy2));
(void) ResetMagickMemory(&entropy_y,0,sizeof(entropy_y));
(void) ResetMagickMemory(&variance,0,sizeof(variance));
for (i=0; i < (ssize_t) number_grays; i++)
{
cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,
sizeof(**cooccurrence));
Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q));
if ((cooccurrence[i] == (ChannelStatistics *) NULL) ||
(Q[i] == (ChannelStatistics *) NULL))
break;
(void) ResetMagickMemory(cooccurrence[i],0,number_grays*
sizeof(**cooccurrence));
(void) ResetMagickMemory(Q[i],0,number_grays*sizeof(**Q));
}
if (i < (ssize_t) number_grays)
{
for (i--; i >= 0; i--)
{
if (Q[i] != (ChannelStatistics *) NULL)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
if (cooccurrence[i] != (ChannelStatistics *) NULL)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
}
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
grays=(LongPixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Initialize spatial dependence matrix.
*/
status=MagickTrue;
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
ssize_t
i,
offset,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,y,image->columns+
2*distance,distance+1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
p+=distance;
indexes+=distance;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < 4; i++)
{
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
offset=(ssize_t) distance;
break;
}
case 1:
{
/*
Vertical adjacency.
*/
offset=(ssize_t) (image->columns+2*distance);
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)+distance);
break;
}
}
u=0;
v=0;
while (grays[u].red != ScaleQuantumToMap(GetPixelRed(p)))
u++;
while (grays[v].red != ScaleQuantumToMap(GetPixelRed(p+offset)))
v++;
cooccurrence[u][v].direction[i].red++;
cooccurrence[v][u].direction[i].red++;
u=0;
v=0;
while (grays[u].green != ScaleQuantumToMap(GetPixelGreen(p)))
u++;
while (grays[v].green != ScaleQuantumToMap(GetPixelGreen(p+offset)))
v++;
cooccurrence[u][v].direction[i].green++;
cooccurrence[v][u].direction[i].green++;
u=0;
v=0;
while (grays[u].blue != ScaleQuantumToMap(GetPixelBlue(p)))
u++;
while (grays[v].blue != ScaleQuantumToMap((p+offset)->blue))
v++;
cooccurrence[u][v].direction[i].blue++;
cooccurrence[v][u].direction[i].blue++;
if (image->colorspace == CMYKColorspace)
{
u=0;
v=0;
while (grays[u].index != ScaleQuantumToMap(GetPixelIndex(indexes+x)))
u++;
while (grays[v].index != ScaleQuantumToMap(GetPixelIndex(indexes+x+offset)))
v++;
cooccurrence[u][v].direction[i].index++;
cooccurrence[v][u].direction[i].index++;
}
if (image->matte != MagickFalse)
{
u=0;
v=0;
while (grays[u].opacity != ScaleQuantumToMap(GetPixelOpacity(p)))
u++;
while (grays[v].opacity != ScaleQuantumToMap((p+offset)->opacity))
v++;
cooccurrence[u][v].direction[i].opacity++;
cooccurrence[v][u].direction[i].opacity++;
}
}
p++;
}
}
grays=(LongPixelPacket *) RelinquishMagickMemory(grays);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Normalize spatial dependence matrix.
*/
for (i=0; i < 4; i++)
{
double
normalize;
register ssize_t
y;
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
normalize=2.0*image->rows*(image->columns-distance);
break;
}
case 1:
{
/*
Vertical adjacency.
*/
normalize=2.0*(image->rows-distance)*image->columns;
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
}
normalize=1.0/(fabs((double) normalize) <= MagickEpsilon ? 1.0 : normalize);
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
cooccurrence[x][y].direction[i].red*=normalize;
cooccurrence[x][y].direction[i].green*=normalize;
cooccurrence[x][y].direction[i].blue*=normalize;
if (image->colorspace == CMYKColorspace)
cooccurrence[x][y].direction[i].index*=normalize;
if (image->matte != MagickFalse)
cooccurrence[x][y].direction[i].opacity*=normalize;
}
}
}
/*
Compute texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Angular second moment: measure of homogeneity of the image.
*/
channel_features[RedChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].red*
cooccurrence[x][y].direction[i].red;
channel_features[GreenChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].green*
cooccurrence[x][y].direction[i].green;
channel_features[BlueChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].blue*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].index*
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
channel_features[OpacityChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].opacity*
cooccurrence[x][y].direction[i].opacity;
/*
Correlation: measure of linear-dependencies in the image.
*/
sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum[y].direction[i].index+=cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
sum[y].direction[i].opacity+=cooccurrence[x][y].direction[i].opacity;
correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red;
correlation.direction[i].green+=x*y*
cooccurrence[x][y].direction[i].green;
correlation.direction[i].blue+=x*y*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
correlation.direction[i].index+=x*y*
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
correlation.direction[i].opacity+=x*y*
cooccurrence[x][y].direction[i].opacity;
/*
Inverse Difference Moment.
*/
channel_features[RedChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1);
channel_features[GreenChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1);
channel_features[BlueChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].index/((y-x)*(y-x)+1);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].opacity/((y-x)*(y-x)+1);
/*
Sum average.
*/
density_xy[y+x+2].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[y+x+2].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[y+x+2].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[y+x+2].direction[i].index+=
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
density_xy[y+x+2].direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
/*
Entropy.
*/
channel_features[RedChannel].entropy[i]-=
cooccurrence[x][y].direction[i].red*
log10(cooccurrence[x][y].direction[i].red+MagickEpsilon);
channel_features[GreenChannel].entropy[i]-=
cooccurrence[x][y].direction[i].green*
log10(cooccurrence[x][y].direction[i].green+MagickEpsilon);
channel_features[BlueChannel].entropy[i]-=
cooccurrence[x][y].direction[i].blue*
log10(cooccurrence[x][y].direction[i].blue+MagickEpsilon);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].entropy[i]-=
cooccurrence[x][y].direction[i].index*
log10(cooccurrence[x][y].direction[i].index+MagickEpsilon);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].entropy[i]-=
cooccurrence[x][y].direction[i].opacity*
log10(cooccurrence[x][y].direction[i].opacity+MagickEpsilon);
/*
Information Measures of Correlation.
*/
density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_x[x].direction[i].index+=
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
density_x[x].direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_y[y].direction[i].index+=
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
density_y[y].direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
}
mean.direction[i].red+=y*sum[y].direction[i].red;
sum_squares.direction[i].red+=y*y*sum[y].direction[i].red;
mean.direction[i].green+=y*sum[y].direction[i].green;
sum_squares.direction[i].green+=y*y*sum[y].direction[i].green;
mean.direction[i].blue+=y*sum[y].direction[i].blue;
sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
{
mean.direction[i].index+=y*sum[y].direction[i].index;
sum_squares.direction[i].index+=y*y*sum[y].direction[i].index;
}
if (image->matte != MagickFalse)
{
mean.direction[i].opacity+=y*sum[y].direction[i].opacity;
sum_squares.direction[i].opacity+=y*y*sum[y].direction[i].opacity;
}
}
/*
Correlation: measure of linear-dependencies in the image.
*/
channel_features[RedChannel].correlation[i]=
(correlation.direction[i].red-mean.direction[i].red*
mean.direction[i].red)/(sqrt(sum_squares.direction[i].red-
(mean.direction[i].red*mean.direction[i].red))*sqrt(
sum_squares.direction[i].red-(mean.direction[i].red*
mean.direction[i].red)));
channel_features[GreenChannel].correlation[i]=
(correlation.direction[i].green-mean.direction[i].green*
mean.direction[i].green)/(sqrt(sum_squares.direction[i].green-
(mean.direction[i].green*mean.direction[i].green))*sqrt(
sum_squares.direction[i].green-(mean.direction[i].green*
mean.direction[i].green)));
channel_features[BlueChannel].correlation[i]=
(correlation.direction[i].blue-mean.direction[i].blue*
mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue-
(mean.direction[i].blue*mean.direction[i].blue))*sqrt(
sum_squares.direction[i].blue-(mean.direction[i].blue*
mean.direction[i].blue)));
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].correlation[i]=
(correlation.direction[i].index-mean.direction[i].index*
mean.direction[i].index)/(sqrt(sum_squares.direction[i].index-
(mean.direction[i].index*mean.direction[i].index))*sqrt(
sum_squares.direction[i].index-(mean.direction[i].index*
mean.direction[i].index)));
if (image->matte != MagickFalse)
channel_features[OpacityChannel].correlation[i]=
(correlation.direction[i].opacity-mean.direction[i].opacity*
mean.direction[i].opacity)/(sqrt(sum_squares.direction[i].opacity-
(mean.direction[i].opacity*mean.direction[i].opacity))*sqrt(
sum_squares.direction[i].opacity-(mean.direction[i].opacity*
mean.direction[i].opacity)));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
x;
for (x=2; x < (ssize_t) (2*number_grays); x++)
{
/*
Sum average.
*/
channel_features[RedChannel].sum_average[i]+=
x*density_xy[x].direction[i].red;
channel_features[GreenChannel].sum_average[i]+=
x*density_xy[x].direction[i].green;
channel_features[BlueChannel].sum_average[i]+=
x*density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].sum_average[i]+=
x*density_xy[x].direction[i].index;
if (image->matte != MagickFalse)
channel_features[OpacityChannel].sum_average[i]+=
x*density_xy[x].direction[i].opacity;
/*
Sum entropy.
*/
channel_features[RedChannel].sum_entropy[i]-=
density_xy[x].direction[i].red*
log10(density_xy[x].direction[i].red+MagickEpsilon);
channel_features[GreenChannel].sum_entropy[i]-=
density_xy[x].direction[i].green*
log10(density_xy[x].direction[i].green+MagickEpsilon);
channel_features[BlueChannel].sum_entropy[i]-=
density_xy[x].direction[i].blue*
log10(density_xy[x].direction[i].blue+MagickEpsilon);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].sum_entropy[i]-=
density_xy[x].direction[i].index*
log10(density_xy[x].direction[i].index+MagickEpsilon);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].sum_entropy[i]-=
density_xy[x].direction[i].opacity*
log10(density_xy[x].direction[i].opacity+MagickEpsilon);
/*
Sum variance.
*/
channel_features[RedChannel].sum_variance[i]+=
(x-channel_features[RedChannel].sum_entropy[i])*
(x-channel_features[RedChannel].sum_entropy[i])*
density_xy[x].direction[i].red;
channel_features[GreenChannel].sum_variance[i]+=
(x-channel_features[GreenChannel].sum_entropy[i])*
(x-channel_features[GreenChannel].sum_entropy[i])*
density_xy[x].direction[i].green;
channel_features[BlueChannel].sum_variance[i]+=
(x-channel_features[BlueChannel].sum_entropy[i])*
(x-channel_features[BlueChannel].sum_entropy[i])*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].sum_variance[i]+=
(x-channel_features[IndexChannel].sum_entropy[i])*
(x-channel_features[IndexChannel].sum_entropy[i])*
density_xy[x].direction[i].index;
if (image->matte != MagickFalse)
channel_features[OpacityChannel].sum_variance[i]+=
(x-channel_features[OpacityChannel].sum_entropy[i])*
(x-channel_features[OpacityChannel].sum_entropy[i])*
density_xy[x].direction[i].opacity;
}
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Sum of Squares: Variance
*/
variance.direction[i].red+=(y-mean.direction[i].red+1)*
(y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red;
variance.direction[i].green+=(y-mean.direction[i].green+1)*
(y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green;
variance.direction[i].blue+=(y-mean.direction[i].blue+1)*
(y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].index+=(y-mean.direction[i].index+1)*
(y-mean.direction[i].index+1)*cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
variance.direction[i].opacity+=(y-mean.direction[i].opacity+1)*
(y-mean.direction[i].opacity+1)*
cooccurrence[x][y].direction[i].opacity;
/*
Sum average / Difference Variance.
*/
density_xy[MagickAbsoluteValue(y-x)].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[MagickAbsoluteValue(y-x)].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[MagickAbsoluteValue(y-x)].direction[i].index+=
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
density_xy[MagickAbsoluteValue(y-x)].direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
/*
Information Measures of Correlation.
*/
entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red*
log10(cooccurrence[x][y].direction[i].red+MagickEpsilon);
entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green*
log10(cooccurrence[x][y].direction[i].green+MagickEpsilon);
entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue*
log10(cooccurrence[x][y].direction[i].blue+MagickEpsilon);
if (image->colorspace == CMYKColorspace)
entropy_xy.direction[i].index-=cooccurrence[x][y].direction[i].index*
log10(cooccurrence[x][y].direction[i].index+MagickEpsilon);
if (image->matte != MagickFalse)
entropy_xy.direction[i].opacity-=
cooccurrence[x][y].direction[i].opacity*log10(
cooccurrence[x][y].direction[i].opacity+MagickEpsilon);
entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red*
log10(density_x[x].direction[i].red*density_y[y].direction[i].red+
MagickEpsilon));
entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green*
log10(density_x[x].direction[i].green*density_y[y].direction[i].green+
MagickEpsilon));
entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue*
log10(density_x[x].direction[i].blue*density_y[y].direction[i].blue+
MagickEpsilon));
if (image->colorspace == CMYKColorspace)
entropy_xy1.direction[i].index-=(
cooccurrence[x][y].direction[i].index*log10(
density_x[x].direction[i].index*density_y[y].direction[i].index+
MagickEpsilon));
if (image->matte != MagickFalse)
entropy_xy1.direction[i].opacity-=(
cooccurrence[x][y].direction[i].opacity*log10(
density_x[x].direction[i].opacity*density_y[y].direction[i].opacity+
MagickEpsilon));
entropy_xy2.direction[i].red-=(density_x[x].direction[i].red*
density_y[y].direction[i].red*log10(density_x[x].direction[i].red*
density_y[y].direction[i].red+MagickEpsilon));
entropy_xy2.direction[i].green-=(density_x[x].direction[i].green*
density_y[y].direction[i].green*log10(density_x[x].direction[i].green*
density_y[y].direction[i].green+MagickEpsilon));
entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue*
density_y[y].direction[i].blue*log10(density_x[x].direction[i].blue*
density_y[y].direction[i].blue+MagickEpsilon));
if (image->colorspace == CMYKColorspace)
entropy_xy2.direction[i].index-=(density_x[x].direction[i].index*
density_y[y].direction[i].index*log10(
density_x[x].direction[i].index*density_y[y].direction[i].index+
MagickEpsilon));
if (image->matte != MagickFalse)
entropy_xy2.direction[i].opacity-=(density_x[x].direction[i].opacity*
density_y[y].direction[i].opacity*log10(
density_x[x].direction[i].opacity*density_y[y].direction[i].opacity+
MagickEpsilon));
}
}
channel_features[RedChannel].variance_sum_of_squares[i]=
variance.direction[i].red;
channel_features[GreenChannel].variance_sum_of_squares[i]=
variance.direction[i].green;
channel_features[BlueChannel].variance_sum_of_squares[i]=
variance.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[RedChannel].variance_sum_of_squares[i]=
variance.direction[i].index;
if (image->matte != MagickFalse)
channel_features[RedChannel].variance_sum_of_squares[i]=
variance.direction[i].opacity;
}
/*
Compute more texture features.
*/
(void) ResetMagickMemory(&variance,0,sizeof(variance));
(void) ResetMagickMemory(&sum_squares,0,sizeof(sum_squares));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Difference variance.
*/
variance.direction[i].red+=density_xy[x].direction[i].red;
variance.direction[i].green+=density_xy[x].direction[i].green;
variance.direction[i].blue+=density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].index+=density_xy[x].direction[i].index;
if (image->matte != MagickFalse)
variance.direction[i].opacity+=density_xy[x].direction[i].opacity;
sum_squares.direction[i].red+=density_xy[x].direction[i].red*
density_xy[x].direction[i].red;
sum_squares.direction[i].green+=density_xy[x].direction[i].green*
density_xy[x].direction[i].green;
sum_squares.direction[i].blue+=density_xy[x].direction[i].blue*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum_squares.direction[i].index+=density_xy[x].direction[i].index*
density_xy[x].direction[i].index;
if (image->matte != MagickFalse)
sum_squares.direction[i].opacity+=density_xy[x].direction[i].opacity*
density_xy[x].direction[i].opacity;
/*
Difference entropy.
*/
channel_features[RedChannel].difference_entropy[i]-=
density_xy[x].direction[i].red*
log10(density_xy[x].direction[i].red+MagickEpsilon);
channel_features[GreenChannel].difference_entropy[i]-=
density_xy[x].direction[i].green*
log10(density_xy[x].direction[i].green+MagickEpsilon);
channel_features[BlueChannel].difference_entropy[i]-=
density_xy[x].direction[i].blue*
log10(density_xy[x].direction[i].blue+MagickEpsilon);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].difference_entropy[i]-=
density_xy[x].direction[i].index*
log10(density_xy[x].direction[i].index+MagickEpsilon);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].difference_entropy[i]-=
density_xy[x].direction[i].opacity*
log10(density_xy[x].direction[i].opacity+MagickEpsilon);
/*
Information Measures of Correlation.
*/
entropy_x.direction[i].red-=(density_x[x].direction[i].red*
log10(density_x[x].direction[i].red+MagickEpsilon));
entropy_x.direction[i].green-=(density_x[x].direction[i].green*
log10(density_x[x].direction[i].green+MagickEpsilon));
entropy_x.direction[i].blue-=(density_x[x].direction[i].blue*
log10(density_x[x].direction[i].blue+MagickEpsilon));
if (image->colorspace == CMYKColorspace)
entropy_x.direction[i].index-=(density_x[x].direction[i].index*
log10(density_x[x].direction[i].index+MagickEpsilon));
if (image->matte != MagickFalse)
entropy_x.direction[i].opacity-=(density_x[x].direction[i].opacity*
log10(density_x[x].direction[i].opacity+MagickEpsilon));
entropy_y.direction[i].red-=(density_y[x].direction[i].red*
log10(density_y[x].direction[i].red+MagickEpsilon));
entropy_y.direction[i].green-=(density_y[x].direction[i].green*
log10(density_y[x].direction[i].green+MagickEpsilon));
entropy_y.direction[i].blue-=(density_y[x].direction[i].blue*
log10(density_y[x].direction[i].blue+MagickEpsilon));
if (image->colorspace == CMYKColorspace)
entropy_y.direction[i].index-=(density_y[x].direction[i].index*
log10(density_y[x].direction[i].index+MagickEpsilon));
if (image->matte != MagickFalse)
entropy_y.direction[i].opacity-=(density_y[x].direction[i].opacity*
log10(density_y[x].direction[i].opacity+MagickEpsilon));
}
/*
Difference variance.
*/
channel_features[RedChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].red)-
(variance.direction[i].red*variance.direction[i].red))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[GreenChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].green)-
(variance.direction[i].green*variance.direction[i].green))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[BlueChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].blue)-
(variance.direction[i].blue*variance.direction[i].blue))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].opacity)-
(variance.direction[i].opacity*variance.direction[i].opacity))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].index)-
(variance.direction[i].index*variance.direction[i].index))/
((double) number_grays*number_grays*number_grays*number_grays);
/*
Information Measures of Correlation.
*/
channel_features[RedChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/
(entropy_x.direction[i].red > entropy_y.direction[i].red ?
entropy_x.direction[i].red : entropy_y.direction[i].red);
channel_features[GreenChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/
(entropy_x.direction[i].green > entropy_y.direction[i].green ?
entropy_x.direction[i].green : entropy_y.direction[i].green);
channel_features[BlueChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/
(entropy_x.direction[i].blue > entropy_y.direction[i].blue ?
entropy_x.direction[i].blue : entropy_y.direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].index-entropy_xy1.direction[i].index)/
(entropy_x.direction[i].index > entropy_y.direction[i].index ?
entropy_x.direction[i].index : entropy_y.direction[i].index);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].opacity-entropy_xy1.direction[i].opacity)/
(entropy_x.direction[i].opacity > entropy_y.direction[i].opacity ?
entropy_x.direction[i].opacity : entropy_y.direction[i].opacity);
channel_features[RedChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].red-
entropy_xy.direction[i].red)))));
channel_features[GreenChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].green-
entropy_xy.direction[i].green)))));
channel_features[BlueChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].blue-
entropy_xy.direction[i].blue)))));
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].index-
entropy_xy.direction[i].index)))));
if (image->matte != MagickFalse)
channel_features[OpacityChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].opacity-
entropy_xy.direction[i].opacity)))));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (i=0; i < 4; i++)
{
for (z=0; z < (ssize_t) number_grays; z++)
{
register ssize_t
y;
ChannelStatistics
pixel;
(void) ResetMagickMemory(&pixel,0,sizeof(pixel));
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Contrast: amount of local variations present in an image.
*/
if (((y-x) == z) || ((x-y) == z))
{
pixel.direction[i].red+=cooccurrence[x][y].direction[i].red;
pixel.direction[i].green+=cooccurrence[x][y].direction[i].green;
pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
pixel.direction[i].index+=cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
pixel.direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
}
/*
Maximum Correlation Coefficient.
*/
Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red*
cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/
density_y[x].direction[i].red;
Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green*
cooccurrence[y][x].direction[i].green/
density_x[z].direction[i].green/density_y[x].direction[i].red;
Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue*
cooccurrence[y][x].direction[i].blue/density_x[z].direction[i].blue/
density_y[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
Q[z][y].direction[i].index+=cooccurrence[z][x].direction[i].index*
cooccurrence[y][x].direction[i].index/
density_x[z].direction[i].index/density_y[x].direction[i].index;
if (image->matte != MagickFalse)
Q[z][y].direction[i].opacity+=
cooccurrence[z][x].direction[i].opacity*
cooccurrence[y][x].direction[i].opacity/
density_x[z].direction[i].opacity/
density_y[x].direction[i].opacity;
}
}
channel_features[RedChannel].contrast[i]+=z*z*pixel.direction[i].red;
channel_features[GreenChannel].contrast[i]+=z*z*pixel.direction[i].green;
channel_features[BlueChannel].contrast[i]+=z*z*pixel.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackChannel].contrast[i]+=z*z*
pixel.direction[i].index;
if (image->matte != MagickFalse)
channel_features[OpacityChannel].contrast[i]+=z*z*
pixel.direction[i].opacity;
}
/*
Maximum Correlation Coefficient.
Future: return second largest eigenvalue of Q.
*/
channel_features[RedChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[GreenChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[BlueChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
}
/*
Relinquish resources.
*/
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
return(channel_features);
}
|
GB_unop__identity_uint32_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint32_uint16
// op(A') function: GB_unop_tran__identity_uint32_uint16
// C type: uint32_t
// A type: uint16_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = (uint32_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint32_uint16
(
uint32_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint32_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
opencl_pgpwde_fmt_plug.c | /*
* Format for brute-forcing PGP WDE disk images.
*
* This software is Copyright (c) 2017 Dhiru Kholia <dhiru at openwall.net> and
* it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_pgpwde;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_pgpwde);
#else
#include <stdint.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "misc.h"
#include "aes.h"
#include "sha.h"
#include "common-opencl.h"
#include "options.h"
#include "pgpwde_common.h"
#define FORMAT_LABEL "pgpwde-opencl"
#define ALGORITHM_NAME "SHA1 OpenCL"
#define BINARY_SIZE 0
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(uint32_t)
#define PLAINTEXT_LENGTH 124
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1001
typedef struct {
uint32_t length;
uint8_t v[PLAINTEXT_LENGTH];
} pgpwde_password;
typedef struct {
uint8_t v[32];
} pgpwde_hash;
typedef struct {
uint32_t saltlen;
uint32_t bytes;
uint32_t key_len;
uint8_t salt[16];
} pgpwde_salt;
static int *cracked;
static int any_cracked;
static struct custom_salt *cur_salt;
static cl_int cl_error;
static pgpwde_password *inbuffer;
static pgpwde_hash *outbuffer;
static pgpwde_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
static struct fmt_main *self;
size_t insize, outsize, settingsize, cracked_size;
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl_autotune.h"
#include "memdbg.h"
static const char *warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(pgpwde_password) * gws;
outsize = sizeof(pgpwde_hash) * gws;
settingsize = sizeof(pgpwde_salt);
cracked_size = sizeof(*cracked) * gws;
inbuffer = mem_calloc(1, insize);
outbuffer = mem_alloc(outsize);
cracked = mem_calloc(1, cracked_size);
// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
if (cracked) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(cracked);
}
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DPLAINTEXT_LENGTH=%d",
PLAINTEXT_LENGTH);
opencl_init("$JOHN/kernels/pgpwde_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "pgpwde", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self,
create_clobj, release_clobj,
sizeof(pgpwde_password), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 300);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
currentsalt.bytes = cur_salt->bytes;
/* NOTE saltlen and key_len are currently hard-coded in kernel, for speed */
currentsalt.saltlen = 16;
currentsalt.key_len = 32;
memcpy((char*)currentsalt.salt, cur_salt->salt, currentsalt.saltlen);
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy setting to gpu");
}
#undef set_key
static void set_key(char *key, int index)
{
uint32_t length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, key, length);
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
uint32_t length = inbuffer[index].length;
memcpy(ret, inbuffer[index].v, length);
ret[length] = '\0';
return ret;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
size_t *lws = local_work_size ? &local_work_size : NULL;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL,
multi_profilingEvent[1]),
"Run kernel");
// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]),
"Copy result back");
if (ocl_autotune_running)
return count;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
unsigned char key[40];
int ret = -1;
memcpy(key, outbuffer[index].v, 32);
ret = pgpwde_decrypt_and_verify(key, cur_salt->esk, 128);
cracked[index] = (0 == ret);
if (ret == 0) {
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_opencl_pgpwde = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
pgpwde_tests,
},
{
init,
done,
reset,
fmt_default_prepare,
pgpwde_valid,
fmt_default_split,
fmt_default_binary,
pgpwde_get_salt,
{
0
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
mssql12_fmt_plug.c | /* Modified in August, 2012 by Dhiru Kholia (dhiru at openwall.com) for MS SQL 2012
*
* This software is Copyright (c) 2010 bartavelle, <bartavelle at bandecon.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*
* Modified by Mathieu Perrin (mathieu at tpfh.org) 09/06
* Microsoft MS-SQL05 password cracker
*
* UTF-8 support by magnum 2011, same terms as above
*
* Creating MS SQL 2012 hashes:
*
* sqlcmd -L
* sqlcmd -S <server> -U sa -P <password>
* 1> select pwdencrypt("openwall")
* 2> go
*
* Dumping hashes from MS SQL server 2012:
*
* sqlcmd -S <server> -U sa -P <password>
* 1> select * from sys.sql_logins
* 2> go */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mssql12;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mssql12);
#else
#include <string.h>
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#define OMP_SCALE 64
#endif
#include "arch.h"
#include "misc.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "unicode.h"
#include "sha2.h"
#include "memdbg.h"
#define FORMAT_LABEL "mssql12"
#define FORMAT_NAME "MS SQL 2012/2014"
#if ARCH_BITS >= 64
#define ALGORITHM_NAME "SHA512 64/" ARCH_BITS_STR " " SHA2_LIB
#else
#define ALGORITHM_NAME "SHA512 32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 25
#define CIPHERTEXT_LENGTH 54 + 44 * 2
#define BINARY_SIZE 64
#define BINARY_ALIGN 4
#define SALT_SIZE 4
#define SALT_ALIGN 4
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#undef MIN
#define MIN(a, b) (((a) > (b)) ? (b) : (a))
static struct fmt_tests tests[] = {
{"0x0200F733058A07892C5CACE899768F89965F6BD1DED7955FE89E1C9A10E27849B0B213B5CE92CC9347ECCB34C3EFADAF2FD99BFFECD8D9150DD6AACB5D409A9D2652A4E0AF16", "Password1!"},
{"0x0200AB3E1F9028A739EEF62ABF672427276A32D5EDD349E638E7F2CD81DAA247CFE20EE4E3B0A30B2D0AE3C3FA010E61752F1BF45E045041F1B988C083C7F118527E3E5F0562", "openwall"},
/* hashes from https://hashcat.net/forum */
{"0x02006BF4AB05873FF0C8A4AFD1DC5912CBFDEF62E0520A3353B04E1184F05C873C9C76BBADDEAAC1E9948C7B6ABFFD62BFEFD7139F17F6AFE10BE0FEE7A178644623067C2423", "carlos"},
{"0x0200935819BA20F1C7289CFF2F8FF9F0E40DA5E6D04986F988CFE6603DA0D2BC0160776614763198967D603FBD8C103151A15E70D18E7B494C7F13F16804A7A4EB206084E632", "test"},
{"0x0200570AC969EF7C6CCB3312E8BEDE1D635EB852C06496957F0FA845B20FCD1C7C457474A5B948B68C47C2CB704D08978871F532C9EB11199BB5F56A06AC915C3799DB8A64C1", "test1"},
{"0x0200A56045DBCD848E297FA8D06E7579D62B7129928CA0BC5D232A7320972EF5A5455C01411B8D3A7FF3D18A55058A12FAEE5DA410AFE6CE61FF5C39E5FF57CD3EDD57DB1C3B", "test2"},
{"0x020059799F1B6D897BE2C5A76D3FFDC52B308190E82FA01F2FA51129B4863A7EE21B3FF6FE9F7850976045237805F338DD36DC9345B429F47A402614C6F2F2B02C56DF14C4F4", "Paul"},
{"0x0200881E2999DD8E3583695F405696257B99559953705A34D774C15AC1D42699BB77BC56DB5F657751335C1B350890E643790553B60329CAE7A2E7D3C04CF8856C4DB0058723", "DBAmaster"},
{"0x0200D648446E70180A6DFB6DF14DB38623EBFE490FE445751900FD5DC45A2B5D20D7AFFE8C6FFC2890BAE1AF34430A21F2F1E4DE50E25757FDB4789716D8D85C6985A00BC454", "database"},
{"0x02008AC3B9DC7B67EF9D3C1D25D8007A4B957D5BD61D71E5E9DA08D9F8F012EDDAD168E1CADD93D4627433FBFEE8BCF6CBB42D5B9A31886FC5FF7F970B164F4B5815E03D6DE7", "jhl9mqe5"},
{"0x020094C4D05A082DB1362B1A972C5D5F1C04C527090A7427E93C13AFEC705A011D8980E994FA647C7D44E25A427246218E25674571DB1710E49C713FB17129549C29E303086A", "coldfusion"},
{"0x0200B9BD5C85918D9BEE84417957618FBA1CB80B71E81550FAE09AD027B4089017CD6461D8EC9509873C2D5096CDBE8F16E4EFA9035C35F9F4917CE58DB99DC6836CEA7483A7", "sql2005"},
{NULL}
};
static unsigned char cursalt[SALT_SIZE];
static char (*saved_key)[(PLAINTEXT_LENGTH + 1) * 2 + SALT_SIZE];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / 4];
static int *key_length;
static int valid(char *ciphertext, struct fmt_main *self)
{
int i;
if (strlen(ciphertext) != CIPHERTEXT_LENGTH) return 0;
if(strncmp(ciphertext, "0x0200", 6))
return 0;
for (i = 6; i < CIPHERTEXT_LENGTH; i++){
if (!( (('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) ||
(('a' <= ciphertext[i])&&(ciphertext[i] <= 'f'))
|| (('A' <= ciphertext[i])&&(ciphertext[i] <= 'F'))))
return 0;
}
return 1;
}
static void set_salt(void *salt)
{
memcpy(cursalt, salt, SALT_SIZE);
}
static void * get_salt(char * ciphertext)
{
static unsigned char *out2;
int l;
if (!out2) out2 = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD);
for(l=0;l<SALT_SIZE;l++)
{
out2[l] = atoi16[ARCH_INDEX(ciphertext[l*2+6])]*16
+ atoi16[ARCH_INDEX(ciphertext[l*2+7])];
}
return out2;
}
static void set_key_enc(char *_key, int index);
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
key_length = mem_calloc_tiny(sizeof(*key_length) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
if (pers_opts.target_enc == UTF_8)
self->params.plaintext_length = MIN(125, PLAINTEXT_LENGTH * 3);
if (pers_opts.target_enc != ISO_8859_1 &&
pers_opts.target_enc != ASCII)
self->methods.set_key = set_key_enc;
}
static void set_key(char *_key, int index)
{
/* ASCII or ISO-8859-1 to UCS-2 */
UTF8 *s = (UTF8*)_key;
UTF16 *d = (UTF16*)saved_key[index];
for (key_length[index] = 0; s[key_length[index]]; key_length[index]++)
#if ARCH_LITTLE_ENDIAN
d[key_length[index]] = s[key_length[index]];
#else
d[key_length[index]] = s[key_length[index]] << 8;
#endif
d[key_length[index]] = 0;
key_length[index] <<= 1;
}
static void set_key_enc(char *_key, int index)
{
/* UTF-8 or legacy codepage to UCS-2 */
key_length[index] = enc_to_utf16((UTF16*)saved_key[index], PLAINTEXT_LENGTH,
(unsigned char*)_key, strlen(_key));
if (key_length[index] < 0)
key_length[index] = strlen16((UTF16*)saved_key[index]);
key_length[index] <<= 1;
}
static char *get_key(int index) {
((UTF16*)saved_key[index])[key_length[index]>>1] = 0;
return (char*)utf16_to_enc((UTF16*)saved_key[index]);
}
static int cmp_all(void *binary, int count) {
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], BINARY_SIZE))
return 1;
return 0;
}
static int cmp_exact(char *source, int count) {
return (1);
}
static int cmp_one(void * binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
SHA512_CTX ctx;
memcpy(saved_key[index]+key_length[index], cursalt, SALT_SIZE);
SHA512_Init(&ctx );
SHA512_Update(&ctx, saved_key[index], key_length[index]+SALT_SIZE );
SHA512_Final((unsigned char *)crypt_out[index], &ctx);
}
return count;
}
static void * binary(char *ciphertext)
{
static char *realcipher;
int i;
if(!realcipher) realcipher = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
for(i=0;i<BINARY_SIZE;i++)
{
realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i*2+14])]*16 + atoi16[ARCH_INDEX(ciphertext[i*2+15])];
}
return (void *)realcipher;
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
static int salt_hash(void *salt)
{
// The >> 8 gave much better distribution on a huge set I analysed
// although that was mssql05
return (*((ARCH_WORD_32 *)salt) >> 8) & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_mssql12 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
DRB050-functionparameter-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
/*
Arrays passed as function parameters
*/
void foo1(double o1[], double c[], int len) {
int i;
long long int AI1[6];
AI1[0] = len + -1;
AI1[1] = 8 * AI1[0];
AI1[2] = AI1[1] + 8;
AI1[3] = AI1[2] / 8;
AI1[4] = (AI1[3] > 0);
AI1[5] = (AI1[4] ? AI1[3] : 0);
char RST_AI1 = 0;
RST_AI1 |= !(((void*) (c + 0) > (void*) (o1 + AI1[5]))
|| ((void*) (o1 + 0) > (void*) (c + AI1[5])));
#pragma omp target data map(to: c[0:AI1[5]]) map(tofrom: o1[0:AI1[5]]) if(!RST_AI1)
{
#pragma omp target parallel for
for (i = 0; i < len; ++i) {
double volnew_o8 = 0.5 * c[i];
o1[i] = volnew_o8;
}
}
}
double o1[100];
double c[100];
int main() {
int i;
int len = 100;
char RST_AI1 = 0;
RST_AI1 |= !(((void*) (c + 0) > (void*) (o1 + 100))
|| ((void*) (o1 + 0) > (void*) (c + 100)));
#pragma omp target data map(tofrom: c[0:100],o1[0:100]) if(!RST_AI1)
{
#pragma omp target parallel for
for (i = 0; i < len; ++i) {
c[i] = i + 1.01;
o1[i] = i + 1.01;
}
}
foo1(o1, c, 100);
for (i = 0; i < len; ++i) {
printf("%lf\n", o1[i]);
}
return 0;
}
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morphology is the application of various kernels, of any size or shape, to an
% image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache-view.h"
#include "magick/color-private.h"
#include "magick/channel.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/monitor-private.h"
#include "magick/morphology.h"
#include "magick/morphology-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/prepress.h"
#include "magick/quantize.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
/*
Other global definitions used by module.
*/
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Integer Factorial Function - for a Binomial kernel */
#if 1
static inline size_t fact(size_t n)
{
size_t l,f;
for(f=1, l=2; l <= n; f=f*l, l++);
return(f);
}
#elif 1 /* glibc floating point alternatives */
#define fact(n) ((size_t)tgamma((double)n+1))
#else
#define fact(n) ((size_t)lgamma((double)n+1))
#endif
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel=kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo method
% when you are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MaxTextExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickCoreSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification
FUTURE: this split on ':' could be done by StringToken()
*/
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum=MagickMaximumValue;
kernel->maximum=(-MagickMaximumValue);
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* this value is not part of neighbourhood */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
GetNextToken(p,&p,MaxTextExtent,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if (kernel->minimum == MagickMaximumValue)
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string)
{
char
token[MaxTextExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
GetNextToken(kernel_string,&p,MaxTextExtent,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *) NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string)
{
KernelInfo
*kernel,
*new_kernel;
char
*kernel_cache,
token[MaxTextExtent];
const char
*p;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p=kernel_string;
kernel_cache=(char *) NULL;
if (*kernel_string == '@')
{
ExceptionInfo *exception=AcquireExceptionInfo();
kernel_cache=FileToString(kernel_string+1,~0UL,exception);
exception=DestroyExceptionInfo(exception);
if (kernel_cache == (char *) NULL)
return((KernelInfo *) NULL);
p=(const char *) kernel_cache;
}
kernel=NULL;
while (GetNextToken(p,(const char **) NULL,MaxTextExtent,token), *token != '\0')
{
/* ignore extra or multiple ';' kernel separators */
if (*token != ';')
{
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) ((unsigned char) *token)) != 0)
new_kernel=ParseKernelName(p);
else /* otherwise a user defined kernel array */
new_kernel=ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if (new_kernel == (KernelInfo *) NULL)
{
if (kernel != (KernelInfo *) NULL)
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if (kernel == (KernelInfo *) NULL)
kernel=new_kernel;
else
LastKernelInfo(kernel)->next=new_kernel;
}
/* look for the next kernel in list */
p=strchr(p,';');
if (p == (char *) NULL)
break;
p++;
}
if (kernel_cache != (char *) NULL)
kernel_cache=DestroyString(kernel_cache);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% Binomial:[{radius}]
% Generate a discrete kernel using a 2 dimentional Pascel's Triangle
% of values. Used for special forma of image filters
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0, 2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *) NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case BinomialKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickCoreSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(1,
sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging
This is equivelent to a KernelRank of 1 */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (< 0.1) the central value becomes larger than one, as a
** result of not generating a actual 'discrete' kernel, and thus
** producing a very bright 'impulse'.
**
** Becuase of these two factors Normalization is required!
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) memset(kernel->values,0, (size_t)
kernel->width*sizeof(*kernel->values));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
case BinomialKernel:
{
size_t
order_f;
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
order_f = fact(kernel->width-1);
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=0; v < (ssize_t)kernel->height; v++)
{ size_t
alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) );
for ( u=0; u < (ssize_t)kernel->width; u++, i++)
kernel->positive_range += kernel->values[i] = (double)
(alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) ));
}
kernel->minimum = 1.0;
kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width];
kernel->negative_range = 0.0;
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3]= +MagickSQ2;
kernel->values[5] = kernel->values[7]= -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19");
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +MagickSQ2;
kernel->values[7] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +MagickSQ2;
kernel->values[8] = -MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -MagickSQ2;
kernel->values[6] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) >= MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>"));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>"));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;");
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo(
"ThinSE:41; ThinSE:42; ThinSE:43");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (new_kernel->values == (double *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if (kernel->next != (KernelInfo *) NULL)
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(double *) RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) )
return MagickFalse;
if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle)
{
KernelInfo
*clone_info,
*last;
last=kernel;
DisableMSCWarning(4127)
while (1) {
RestoreMSCWarning
clone_info=CloneKernelInfo(last);
if (clone_info == (KernelInfo *) NULL)
break;
RotateKernelInfo(clone_info,angle);
if (SameKernelInfo(kernel,clone_info) != MagickFalse)
break;
LastKernelInfo(last)->next=clone_info;
last=clone_info;
}
if (clone_info != (KernelInfo *) NULL)
clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels. This is the method that should be called by
% other 'operators' that internally use morphology operations as part of
% their processing.
%
% It is basically equivalent to as MorphologyImage() (see below) but
% without any user controls. This allows internel programs to use this
% function, to actually perform a specific task without possible interference
% by any API user supplied settings.
%
% It is MorphologyImage() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically all given kernels should already be scaled, normalised,
% and blended appropriatally before being parred to this routine. The
% appropriate bias, and compose (typically 'UndefinedComposeOp') given.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ChannelType channel, const ssize_t iterations,
% const KernelInfo *kernel, const CompositeMethod compose,
% const double bias, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o channel: the channels to which the operations are applied
% The channel 'sync' flag determines if 'alpha weighting' is
% applied for convolution style operations.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* Apply a Morphology Primative to an image using the given kernel.
** Two pre-created images must be provided, and no image is created.
** It returns the number of pixels that changed between the images
** for result convergence determination.
*/
static ssize_t MorphologyPrimitive(const Image *image, Image *result_image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,const double bias,ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*p_view,
*q_view;
register ssize_t
i;
size_t
*changes,
changed,
virt_width;
ssize_t
y,
offx,
offy;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(result_image != (Image *) NULL);
assert(result_image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
progress=0;
p_view=AcquireVirtualCacheView(image,exception);
q_view=AcquireAuthenticCacheView(result_image,exception);
virt_width=image->columns+kernel->width-1;
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* kernel is used as is, without reflection */
break;
default:
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
changed=0;
changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(),
sizeof(*changes));
if (changes == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changes[i]=0;
if ( method == ConvolveMorphology && kernel->width == 1 )
{ /* Special handling (for speed) of vertical (blur) kernels.
** This performs its handling in columns rather than in rows.
** This is only done for convolve as it is the only method that
** generates very large 1-D vertical kernels (such as a 'BlurKernel')
**
** Timing tests (on single CPU laptop)
** Using a vertical 1-d Blue with normal row-by-row (below)
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.807u
** Using this column method
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.620u
**
** Anthony Thyssen, 14 June 2010
*/
register ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,result_image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register const PixelPacket
*magick_restrict p;
register const IndexPacket
*magick_restrict p_indexes;
register PixelPacket
*magick_restrict q;
register IndexPacket
*magick_restrict q_indexes;
register ssize_t
y;
ssize_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view,x,-offy,1,image->rows+kernel->height-1,
exception);
q=GetCacheViewAuthenticPixels(q_view,x,0,1,result_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = offy;
for (y=0; y < (ssize_t) image->rows; y++)
{
DoublePixelPacket
result;
register ssize_t
v;
register const double
*magick_restrict k;
register const PixelPacket
*magick_restrict k_pixels;
register const IndexPacket
*magick_restrict k_indexes;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+y,GetPixelIndex(p_indexes+y+r));
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+y;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNaN(*k) ) continue;
result.red += (*k)*GetPixelRed(k_pixels);
result.green += (*k)*GetPixelGreen(k_pixels);
result.blue += (*k)*GetPixelBlue(k_pixels);
result.opacity += (*k)*GetPixelOpacity(k_pixels);
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+y,ClampToQuantum(result.index));
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
double
gamma; /* divisor, sum of color alpha weighting */
MagickRealType
alpha; /* alpha weighting for colors : alpha */
size_t
count; /* alpha valus collected, number kernel values */
count=0;
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNaN(*k) ) continue;
alpha=QuantumScale*(QuantumRange-GetPixelOpacity(k_pixels));
count++; /* number of alpha values collected */
alpha*=(*k); /* include kernel weighting now */
gamma += alpha; /* normalize alpha weights only */
result.red += alpha*GetPixelRed(k_pixels);
result.green += alpha*GetPixelGreen(k_pixels);
result.blue += alpha*GetPixelBlue(k_pixels);
result.opacity += (*k)*GetPixelOpacity(k_pixels);
if ( image->colorspace == CMYKColorspace)
result.index += alpha*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
/* Sync'ed channels, all channels are modified */
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height/count;
SetPixelRed(q,ClampToQuantum(gamma*result.red));
SetPixelGreen(q,ClampToQuantum(gamma*result.green));
SetPixelBlue(q,ClampToQuantum(gamma*result.blue));
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+y,ClampToQuantum(gamma*result.index));
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q))
|| ( p[r].green != GetPixelGreen(q))
|| ( p[r].blue != GetPixelBlue(q))
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+y+r) != GetPixelIndex(q_indexes+y))) )
changes[id]++;
p++;
q++;
} /* y */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* x */
result_image->type=image->type;
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changed+=changes[i];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : 0);
}
/*
** Normal handling of horizontal or rectangular kernels (row by row)
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,result_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const PixelPacket
*magick_restrict p;
register const IndexPacket
*magick_restrict p_indexes;
register PixelPacket
*magick_restrict q;
register IndexPacket
*magick_restrict q_indexes;
register ssize_t
x;
size_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view, -offx, y-offy, virt_width,
kernel->height, exception);
q=GetCacheViewAuthenticPixels(q_view,0,y,result_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
register ssize_t
u;
register const double
*magick_restrict k;
register const PixelPacket
*magick_restrict k_pixels;
register const IndexPacket
*magick_restrict k_indexes;
DoublePixelPacket
result,
min,
max;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,GetPixelIndex(p_indexes+x+r));
/* Defaults */
min.red =
min.green =
min.blue =
min.opacity =
min.index = (double) QuantumRange;
max.red =
max.green =
max.blue =
max.opacity =
max.index = 0.0;
/* default result is the original pixel value */
result.red = (double) p[r].red;
result.green = (double) p[r].green;
result.blue = (double) p[r].blue;
result.opacity = QuantumRange - (double) p[r].opacity;
result.index = 0.0;
if ( image->colorspace == CMYKColorspace)
result.index = (double) GetPixelIndex(p_indexes+x+r);
switch (method) {
case ConvolveMorphology:
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
break;
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
/* use a boolean flag indicating when first match found */
result.red = 0.0; /* result is not used otherwise */
break;
default:
break;
}
switch ( method ) {
case ConvolveMorphology:
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** Correlation is actually the same as this but without reflecting
** the kernel, and thus 'lower-level' that Convolution. However
** as Convolution is the more common method used, and it does not
** really cost us much in terms of processing to use a reflected
** kernel, so it is Convolution that is implemented.
**
** Correlation will have its kernel reflected before calling
** this function to do a Convolve.
**
** For more details of Correlation vs Convolution see
** http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
result.red += (*k)*k_pixels[u].red;
result.green += (*k)*k_pixels[u].green;
result.blue += (*k)*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*GetPixelIndex(k_indexes+u);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum((MagickRealType) result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum((MagickRealType) result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum((MagickRealType) result.blue));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,ClampToQuantum((MagickRealType) result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
double
alpha, /* alpha weighting for colors : alpha */
gamma; /* divisor, sum of color alpha weighting */
size_t
count; /* alpha valus collected, number kernel values */
count=0;
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
alpha=QuantumScale*(QuantumRange-k_pixels[u].opacity);
count++; /* number of alpha values collected */
alpha*=(*k); /* include kernel weighting now */
gamma += alpha; /* normalize alpha weights only */
result.red += alpha*k_pixels[u].red;
result.green += alpha*k_pixels[u].green;
result.blue += alpha*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index+=alpha*GetPixelIndex(k_indexes+u);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Sync'ed channels, all channels are modified */
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height*kernel->width/count;
SetPixelRed(q,ClampToQuantum((MagickRealType) (gamma*result.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (gamma*result.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (gamma*result.blue)));
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum((MagickRealType) (gamma*
result.index)));
}
break;
case ErodeMorphology:
/* Minimum Value within kernel neighbourhood
**
** NOTE that the kernel is not reflected for this operation!
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index,(double) GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateMorphology:
/* Maximum Value within kernel neighbourhood
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
**
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) GetPixelIndex(
k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* Minimum of Foreground Pixel minus Maxumum of Background Pixels
**
** NOTE that the kernel is not reflected for this operation,
** and consists of both foreground and background pixel
** neighbourhoods, 0.0 for background, and 1.0 for foreground
** with either Nan or 0.5 values for don't care.
**
** Note that this will never produce a meaningless negative
** result. Such results can cause Thinning/Thicken to not work
** correctly when used against a greyscale image.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) ) continue;
if ( (*k) > 0.7 )
{ /* minimim of foreground pixels */
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index,(double) GetPixelIndex(
k_indexes+u));
}
else if ( (*k) < 0.3 )
{ /* maximum of background pixels */
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) GetPixelIndex(
k_indexes+u));
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Pattern Match if difference is positive */
min.red -= max.red; Maximize( min.red, 0.0 );
min.green -= max.green; Maximize( min.green, 0.0 );
min.blue -= max.blue; Maximize( min.blue, 0.0 );
min.opacity -= max.opacity; Maximize( min.opacity, 0.0 );
min.index -= max.index; Maximize( min.index, 0.0 );
break;
case ErodeIntensityMorphology:
/* Select Pixel with Minimum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity.
**
** NOTE that the kernel is not reflected for this operation!
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
if ( result.red == 0.0 ||
GetPixelIntensity(image,&(k_pixels[u])) < GetPixelIntensity(result_image,q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changes[id]++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateIntensityMorphology:
/* Select Pixel with Maximum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity (yet).
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue; /* boolean kernel */
if ( result.red == 0.0 ||
GetPixelIntensity(image,&(k_pixels[u])) > GetPixelIntensity(result_image,q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changes[id]++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case IterativeDistanceMorphology:
/* Work out an iterative distance from black edge of a white image
** shape. Essentually white values are decreased to the smallest
** 'distance from edge' it can find.
**
** It works by adding kernel values to the neighbourhood, and and
** select the minimum value found. The kernel is rotated before
** use, so kernel distances match resulting distances, when a user
** provided asymmetric kernel is applied.
**
**
** This code is almost identical to True GrayScale Morphology But
** not quite.
**
** GreyDilate Kernel values added, maximum value found Kernel is
** rotated before use.
**
** GrayErode: Kernel values subtracted and minimum value found No
** kernel rotation used.
**
** Note the the Iterative Distance method is essentially a
** GrayErode, but with negative kernel values, and kernel
** rotation applied.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case UndefinedMorphology:
default:
break; /* Do nothing */
}
/* Final mathematics of results (combine with original image?)
**
** NOTE: Difference Morphology operators Edge* and *Hat could also
** be done here but works better with iteration as a image difference
** in the controlling function (below). Thicken and Thinning however
** should be done here so thay can be iterated correctly.
*/
switch ( method ) {
case HitAndMissMorphology:
case ErodeMorphology:
result = min; /* minimum of neighbourhood */
break;
case DilateMorphology:
result = max; /* maximum of neighbourhood */
break;
case ThinningMorphology:
/* subtract pattern match from original */
result.red -= min.red;
result.green -= min.green;
result.blue -= min.blue;
result.opacity -= min.opacity;
result.index -= min.index;
break;
case ThickenMorphology:
/* Add the pattern matchs to the original */
result.red += min.red;
result.green += min.green;
result.blue += min.blue;
result.opacity += min.opacity;
result.index += min.index;
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case UndefinedMorphology:
case ConvolveMorphology:
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
break; /* full pixel was directly assigned - not a channel method */
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0
&& image->matte != MagickFalse )
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) )
changes[id]++;
p++;
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* y */
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changed+=changes[i];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t)changed : -1);
}
/* This is almost identical to the MorphologyPrimative() function above,
** but will apply the primitive directly to the actual image using two
** passes, once in each direction, with the results of the previous (and
** current) row being re-used.
**
** That is after each row is 'Sync'ed' into the image, the next row will
** make use of those values as part of the calculation of the next row.
** It then repeats, but going in the oppisite (bottom-up) direction.
**
** Because of this 're-use of results' this function can not make use
** of multi-threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,ExceptionInfo *exception)
{
CacheView
*auth_view,
*virt_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y, offx, offy;
size_t
changed,
virt_width;
status=MagickTrue;
changed=0;
progress=0;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case DistanceMorphology:
case VoronoiMorphology:
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
#if 0
case ?????Morphology:
/* kernel is used as is, without reflection */
break;
#endif
default:
assert("Not a PrimativeDirect Morphology Method" != (char *) NULL);
break;
}
/* DO NOT THREAD THIS CODE! */
/* two views into same image (virtual, and actual) */
virt_view=AcquireVirtualCacheView(image,exception);
auth_view=AcquireAuthenticCacheView(image,exception);
virt_width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register const IndexPacket
*magick_restrict p_indexes;
register PixelPacket
*magick_restrict q;
register IndexPacket
*magick_restrict q_indexes;
register ssize_t
x;
ssize_t
r;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only top half of kernel is processed as we do a single pass downward
** through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
break;
p=GetCacheViewVirtualPixels(virt_view, -offx, y-offy, virt_width, (size_t) offy+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = (ssize_t) virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
register ssize_t
u;
register const double
*magick_restrict k;
register const PixelPacket
*magick_restrict k_pixels;
register const IndexPacket
*magick_restrict k_indexes;
MagickPixelPacket
result;
/* Starting Defaults */
GetMagickPixelPacket(image,&result);
SetMagickPixelPacket(image,q,q_indexes,&result);
if ( method != VoronoiMorphology )
result.opacity = QuantumRange - result.opacity;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
break;
case VoronoiMorphology:
/* Apply Distance to 'Matte' channel, while coping the color
** values of the closest pixel.
**
** This is experimental, and realy the 'alpha' component should
** be completely separate 'masking' channel so that alpha can
** also be used as part of the results.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case VoronoiMorphology:
SetPixelPacket(image,&result,q,q_indexes);
break;
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) )
changed++; /* The pixel was changed in some way! */
p++; /* increment pixel buffers */
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
if (SetImageProgress(image,MorphologyTag,progress,image->rows) == MagickFalse )
status=MagickFalse;
}
} /* y */
/* Do the reversed pass through the image */
for (y=(ssize_t)image->rows-1; y >= 0; y--)
{
register const PixelPacket
*magick_restrict p;
register const IndexPacket
*magick_restrict p_indexes;
register PixelPacket
*magick_restrict q;
register IndexPacket
*magick_restrict q_indexes;
register ssize_t
x;
ssize_t
r;
if (status == MagickFalse)
break;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only the bottom half of the kernel will be processes as we
** up the image.
*/
p=GetCacheViewVirtualPixels(virt_view, -offx, y, virt_width, (size_t) kernel->y+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* adjust positions to end of row */
p += image->columns-1;
q += image->columns-1;
/* offset to origin in 'p'. while 'q' points to it directly */
r = offx;
for (x=(ssize_t)image->columns-1; x >= 0; x--)
{
ssize_t
v;
register ssize_t
u;
register const double
*magick_restrict k;
register const PixelPacket
*magick_restrict k_pixels;
register const IndexPacket
*magick_restrict k_indexes;
MagickPixelPacket
result;
/* Default - previously modified pixel */
GetMagickPixelPacket(image,&result);
SetMagickPixelPacket(image,q,q_indexes,&result);
if ( method != VoronoiMorphology )
result.opacity = QuantumRange - result.opacity;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
break;
case VoronoiMorphology:
/* Apply Distance to 'Matte' channel, coping the closest color.
**
** This is experimental, and realy the 'alpha' component should
** be completely separate 'masking' channel.
*/
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case VoronoiMorphology:
SetPixelPacket(image,&result,q,q_indexes);
break;
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) )
changed++; /* The pixel was changed in some way! */
p--; /* go backward through pixel buffers */
q--;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
if ( SetImageProgress(image,MorphologyTag,progress,image->rows) == MagickFalse )
status=MagickFalse;
}
} /* y */
auth_view=DestroyCacheView(auth_view);
virt_view=DestroyCacheView(virt_view);
return(status ? (ssize_t) changed : -1);
}
/* Apply a Morphology by calling one of the above low level primitive
** application functions. This function handles any iteration loops,
** composition or re-iteration of results, and compound morphology methods
** that is based on multiple low-level (staged) morphology methods.
**
** Basically this provides the complex grue between the requested morphology
** method and raw low-level implementation (above).
*/
MagickExport Image *MorphologyApply(const Image *image, const ChannelType
channel,const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,
const double bias, ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[MaxTextExtent];
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *) NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsMagickTrue(GetImageArtifact(image,"debug"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special != MagickFalse )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass) == MagickFalse)
{
InheritException(exception,&rslt_image->exception);
goto error_cleanup;
}
changed = MorphologyPrimitiveDirect(rslt_image, method,
channel, kernel, exception);
if ( verbose != MagickFalse )
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
(void) CompositeImageChannel(rslt_image, DefaultChannels,
CopyOpacityCompositeOp, image, 0, 0);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if ( verbose != MagickFalse ) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MaxTextExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MaxTextExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass) == MagickFalse)
{
InheritException(exception,&work_image->exception);
goto error_cleanup;
}
/* work_image->type=image->type; ??? */
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
channel, this_kernel, bias, exception);
if ( verbose != MagickFalse ) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if ( verbose != MagickFalse && kernel_changed != (size_t)changed )
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if ( verbose != MagickFalse && stage_loop < stage_limit )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr,
"\n%s: Difference with original image",
CommandOptionToMnemonic(MagickMorphologyOptions,method));
(void) CompositeImageChannel(curr_image,(ChannelType)
(channel & ~SyncChannels),DifferenceCompositeOp,image,0,0);
break;
case EdgeMorphology:
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr,
"\n%s: Difference of Dilate and Erode",
CommandOptionToMnemonic(MagickMorphologyOptions,method));
(void) CompositeImageChannel(curr_image,(ChannelType)
(channel & ~SyncChannels),DifferenceCompositeOp,save_image,0,0);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if ( verbose != MagickFalse ) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImageChannel(rslt_image,
(ChannelType) (channel & ~SyncChannels), rslt_compose,
curr_image, 0, 0);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImageChannel() applies a user supplied kernel to the image
% according to the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-bias"
or "-define convolve:bias=??")
% * Kernel Scale/normalize settings ("-set 'option:convolve:scale'")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-set option:showKernel 1")
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% Image *MorphologyImageChannel(const Image *image, const ChannelType
% channel,MorphologyMethod method,const ssize_t iterations,
% KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImage(const Image *image,
const MorphologyMethod method,const ssize_t iterations,
const KernelInfo *kernel,ExceptionInfo *exception)
{
Image
*morphology_image;
morphology_image=MorphologyImageChannel(image,DefaultChannels,method,
iterations,kernel,exception);
return(morphology_image);
}
MagickExport Image *MorphologyImageChannel(const Image *image,
const ChannelType channel,const MorphologyMethod method,
const ssize_t iterations,const KernelInfo *kernel,ExceptionInfo *exception)
{
KernelInfo
*curr_kernel;
CompositeOperator
compose;
double
bias;
Image
*morphology_image;
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
curr_kernel = (KernelInfo *) kernel;
bias=image->bias;
if ((method == ConvolveMorphology) || (method == CorrelateMorphology))
{
const char
*artifact;
artifact = GetImageArtifact(image,"convolve:bias");
if (artifact != (const char *) NULL)
bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0);
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *) NULL ) {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL) {
curr_kernel=DestroyKernelInfo(curr_kernel);
return((Image *) NULL);
}
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
/* display the (normalized) kernel via stderr */
if ( IsMagickTrue(GetImageArtifact(image,"showKernel"))
|| IsMagickTrue(GetImageArtifact(image,"convolve:showKernel"))
|| IsMagickTrue(GetImageArtifact(image,"morphology:showKernel")) )
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{ const char
*artifact;
compose = UndefinedCompositeOp; /* use default for method */
artifact = GetImageArtifact(image,"morphology:compose");
if ( artifact != (const char *) NULL)
compose = (CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,artifact);
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image, channel, method, iterations,
curr_kernel, compose, bias, exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
double t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register size_t
i,j,x,y;
register double
*k,t;
k=kernel->values;
for( i=0, x=kernel->width-1; i<=x; i++, x--)
for( j=0, y=kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
double
t;
register double
*k;
size_t
i,
j;
k=kernel->values;
for ( i=0, j=kernel->width*kernel->height-1; i<j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
GeometryFlags
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = (GeometryFlags) ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register ssize_t
i;
register double
pos_scale,
neg_scale;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if ( ! IsNaN(kernel->values[i]) )
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'showKernel' option request.
%
% The format of the ShowKernelInfo method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) >= MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if ( IsNaN(k->values[i]) )
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if ( IsNaN(kernel->values[i]) )
kernel->values[i] = 0.0;
return;
}
|
bfs.c | /* Copyright (C) 2010-2011 The Trustees of Indiana University. */
/* */
/* Use, modification and distribution is subject to the Boost Software */
/* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */
/* http://www.boost.org/LICENSE_1_0.txt) */
/* */
/* Authors: Jeremiah Willcock */
/* Andrew Lumsdaine */
/* */
/* Originaly a traditional top-down BFS routine */
/* */
/* */
/* Modified by June Wu */
/* Modified on 05/08/2017 */
/* */
/* */
/* A Hybrid Breadth-First Search is implemented using 1D partitioning of */
/* vertices and two queues. A bitmap is used to indicate which vertices */
/* have been visited. Messages are sent and processed asynchronously */
/* using non-blocking send and receive to overlap computation with */
/* communication. */
/* */
/* */
/* The variables: */
/* The total number of MPI tasks: size */
/* The total number of vertices: g.nglobalverts */
/* The total numbef of vertices a process is currently holding: nlocalverts */
/* Current frontier: g_oldq */
/* Next Level frontier: g_newq */
#include "common.h"
#include "oned_csr.h"
#include <mpi.h>
#include <math.h>
#include <stdint.h>
#include <inttypes.h>
#include <stdlib.h>
#include <stddef.h>
#include <string.h>
#include <limits.h>
#include <assert.h>
static oned_csr_graph g;
static int64_t* g_oldq;
static int64_t* g_newq;
static unsigned long* g_visited;
static const int coalescing_size = 256;
static int64_t* g_outgoing;
static size_t* g_outgoing_counts /* 2x actual count */;
static MPI_Request* g_outgoing_reqs;
static int* g_outgoing_reqs_active;
static int64_t* g_recvbuf;
int64_t* global_oldq;
unsigned long* bottom_up_visited;
int* counts;
int* displs;
int64_t* localverts;
int64_t* notvisited;
void make_graph_data_structure(const tuple_graph* const tg) {
convert_graph_to_oned_csr(tg, &g);
const size_t nlocalverts = g.nlocalverts;
g_oldq = (int64_t*)xmalloc(nlocalverts * sizeof(int64_t));
g_newq = (int64_t*)xmalloc(nlocalverts * sizeof(int64_t));
const int ulong_bits = sizeof(unsigned long) * CHAR_BIT;
int64_t visited_size = (nlocalverts + ulong_bits - 1) / ulong_bits;
g_visited = (unsigned long*)xmalloc(visited_size * sizeof(unsigned long));
g_outgoing = (int64_t*)xMPI_Alloc_mem(coalescing_size * size * 2 * sizeof(int64_t));
g_outgoing_counts = (size_t*)xmalloc(size * sizeof(size_t)) /* 2x actual count */;
g_outgoing_reqs = (MPI_Request*)xmalloc(size * sizeof(MPI_Request));
g_outgoing_reqs_active = (int*)xmalloc(size * sizeof(int));
g_recvbuf = (int64_t*)xMPI_Alloc_mem(coalescing_size * 2 * sizeof(int64_t));
}
void free_graph_data_structure(void) {
free(counts);
free(displs);
free(global_oldq);
free(g_oldq);
free(g_newq);
free(g_visited);
MPI_Free_mem(g_outgoing);
free(g_outgoing_counts);
free(g_outgoing_reqs);
free(g_outgoing_reqs_active);
MPI_Free_mem(g_recvbuf);
free_oned_csr_graph(&g);
free(localverts);
free(notvisited);
}
int bfs_writes_depth_map(void) {
return 0;
}
/* This is the hybrid level-synchronized BFS */
void run_bfs(int64_t root, int64_t* pred, int SCALE) {
const size_t nlocalverts = g.nlocalverts;
/* Set up the queues. */
int64_t* oldq = g_oldq;
int64_t* newq = g_newq;
size_t oldq_count = 0;
size_t newq_count = 0;
size_t global_newq_count = 0;
/* Set up two visited bitmap. */
/* One for bottom-up rountine, one for top-down routine. */
const int ulong_bits = sizeof(unsigned long) * CHAR_BIT;
int64_t visited_size = (nlocalverts + ulong_bits - 1) / ulong_bits;
unsigned long* visited = g_visited;
memset(visited, 0, visited_size * sizeof(unsigned long));
#define SET_VISITED(v) do {visited[VERTEX_LOCAL((v)) / ulong_bits] |= (1UL << (VERTEX_LOCAL((v)) % ulong_bits));} while (0)
#define TEST_VISITED(v) ((visited[VERTEX_LOCAL((v)) / ulong_bits] & (1UL << (VERTEX_LOCAL((v)) % ulong_bits))) != 0)
/* Set up buffers for message coalescing, MPI requests, etc. for
* communication. */
const int coalescing_size = 256;
int64_t* outgoing = g_outgoing;
size_t* outgoing_counts = g_outgoing_counts;
MPI_Request* outgoing_reqs = g_outgoing_reqs;
int* outgoing_reqs_active = g_outgoing_reqs_active;
memset(outgoing_reqs_active, 0, size * sizeof(int));
int64_t* recvbuf = g_recvbuf;
MPI_Request recvreq;
int recvreq_active = 0;
/* Termination counter for each level: this variable counts the number of
* ranks that have said that they are done sending to me in the current
* level. This rank can stop listening for new messages when it reaches
* size. */
int num_ranks_done;
/* Set all vertices to "not visited." */
{size_t i; for (i = 0; i < nlocalverts; ++i) pred[i] = -1;}
/* Mark the root and put it into the queue. */
if (VERTEX_OWNER(root) == rank) {
SET_VISITED(root);
pred[VERTEX_LOCAL(root)] = root;
oldq[oldq_count++] = root;
}
/* Creat an array that holds locally owned vertices */
localverts = (int64_t*)xmalloc(nlocalverts * sizeof(int64_t));
int64_t i = 0;
for(int p = 0; p < g.nglobalverts; p++){
if(VERTEX_OWNER(p) == rank){
localverts[i] = p;
i++;
}
}
#define CHECK_MPI_REQS \
/* Check all MPI requests and handle any that have completed. */ \
do { \
/* Test for incoming vertices to put onto the queue. */ \
while (recvreq_active) { \
int flag; \
MPI_Status st; \
MPI_Test(&recvreq, &flag, &st); \
if (flag) { \
recvreq_active = 0; \
int count; \
MPI_Get_count(&st, MPI_INT64_T, &count); \
/* count == 0 is a signal from a rank that it is done sending to me
* (using MPI's non-overtaking rules to keep that signal after all
* "real" messages. */ \
if (count == 0) { \
++num_ranks_done; \
} else { \
int j; \
for (j = 0; j < count; j += 2) { \
int64_t tgt = recvbuf[j]; \
int64_t src = recvbuf[j + 1]; \
/* Process one incoming edge. */ \
assert (VERTEX_OWNER(tgt) == rank); \
if (!TEST_VISITED(tgt)){\
SET_VISITED(tgt); \
pred[VERTEX_LOCAL(tgt)] = src; \
newq[newq_count++] = tgt; \
} \
} \
} \
/* Restart the receive if more messages will be coming. */ \
if (num_ranks_done < size) { \
MPI_Irecv(recvbuf, coalescing_size * 2, MPI_INT64_T, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &recvreq); \
recvreq_active = 1; \
} \
} else break; \
} \
/* Mark any sends that completed as inactive so their buffers can be
* reused. */ \
int c; \
for (c = 0; c < size; ++c) { \
if (outgoing_reqs_active[c]) { \
int flag; \
MPI_Test(&outgoing_reqs[c], &flag, MPI_STATUS_IGNORE); \
if (flag) outgoing_reqs_active[c] = 0; \
} \
} \
} while (0)
while (1) {
/* Gather the number of vertices in the next frontier */
MPI_Allreduce(&oldq_count, &global_newq_count, 1, MPI_INT64_T, MPI_SUM, MPI_COMM_WORLD);
/* The criteria to choose between the top-down or the bottom-up is whether
* the next frontier contains more or less than the half of the size of the total
* number of vertices. If the next frontier has less than half of the size,
* use top-down BFS; if the next frontier has more than half of the size,
* then use bottom-up. */
/* start of the top-down BFS */
if (global_newq_count < pow(2,SCALE-1)){
memset(outgoing_counts, 0, size * sizeof(size_t));
num_ranks_done = 1; /* I never send to myself, so I'm always done */
/* Start the initial receive. */
if (num_ranks_done < size) {
MPI_Irecv(recvbuf, coalescing_size * 2, MPI_INT64_T, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &recvreq);
recvreq_active = 1;
}
/* Step through the current level's queue. */
size_t i;
for (i = 0; i < oldq_count; ++i) {
CHECK_MPI_REQS;
assert (VERTEX_OWNER(oldq[i]) == rank);
assert (pred[VERTEX_LOCAL(oldq[i])] >= 0 && pred[VERTEX_LOCAL(oldq[i])] < g.nglobalverts);
int64_t src = oldq[i];
/* iterate through its incident edges. */
size_t j, j_end = g.rowstarts[VERTEX_LOCAL(oldq[i]) + 1];
for (j = g.rowstarts[VERTEX_LOCAL(oldq[i])]; j < j_end; ++j) {
int64_t tgt = g.column[j];
int owner = VERTEX_OWNER(tgt);
if (owner == rank) {
if (!TEST_VISITED(tgt)) {
SET_VISITED(tgt);
pred[VERTEX_LOCAL(tgt)] = src;
newq[newq_count++] = tgt;
}
} else {
while (outgoing_reqs_active[owner]) CHECK_MPI_REQS; /* Wait for buffer to be available */
size_t c = outgoing_counts[owner];
outgoing[owner * coalescing_size * 2 + c] = tgt;
outgoing[owner * coalescing_size * 2 + c + 1] = src;
outgoing_counts[owner] += 2;
if (outgoing_counts[owner] == coalescing_size * 2) {
MPI_Isend(&outgoing[owner * coalescing_size * 2], coalescing_size * 2, MPI_INT64_T, owner, 0, MPI_COMM_WORLD, &outgoing_reqs[owner]);
outgoing_reqs_active[owner] = 1;
outgoing_counts[owner] = 0;
}
}
}
}
/* Flush any coalescing buffers that still have messages. */
int offset;
for (offset = 1; offset < size; ++offset) {
int dest = MOD_SIZE(rank + offset);
if (outgoing_counts[dest] != 0) {
while (outgoing_reqs_active[dest]) CHECK_MPI_REQS;
MPI_Isend(&outgoing[dest * coalescing_size * 2], outgoing_counts[dest], MPI_INT64_T, dest, 0, MPI_COMM_WORLD, &outgoing_reqs[dest]);
outgoing_reqs_active[dest] = 1;
outgoing_counts[dest] = 0;
}
/* Wait until all sends to this destination are done. */
while (outgoing_reqs_active[dest]) CHECK_MPI_REQS;
/* Tell the destination that we are done sending to them. */
MPI_Isend(&outgoing[dest * coalescing_size * 2], 0, MPI_INT64_T, dest, 0, MPI_COMM_WORLD, &outgoing_reqs[dest]); /* Signal no more sends */
outgoing_reqs_active[dest] = 1;
while (outgoing_reqs_active[dest]) CHECK_MPI_REQS;
}
/* Wait until everyone else is done (and thus couldn't send us any more
* messages). */
while (num_ranks_done < size) CHECK_MPI_REQS;
/* Test globally if all queues are empty. */
global_newq_count;
MPI_Allreduce(&newq_count, &global_newq_count, 1, MPI_INT64_T, MPI_SUM, MPI_COMM_WORLD);
/* Quit if they all are empty. */
if (global_newq_count == 0) break;
/* Swap old and new queues; clear new queue for next level. */
{int64_t* temp = oldq; oldq = newq; newq = temp;}
oldq_count = newq_count;
newq_count = 0;
} /* end of the top-down BFS*/
/* start of the bottom-up BFS */
else{
memset(outgoing_counts, 0, size * sizeof(size_t));
num_ranks_done = 1; /* I never send to myself, so I'm always done */
/*Get information about the number of vertices in the current frontier from all MPI tasks */
counts = (int*)xmalloc(size * sizeof(int));
displs = (int*)xmalloc(size * sizeof(int));
int oldq_count_int = oldq_count;
MPI_Allgather(&oldq_count_int, 1, MPI_INT, counts, 1, MPI_INT, MPI_COMM_WORLD);
displs[0] = 0;
for (int i = 1; i < size; i++){
displs[i] = displs[i-1]+counts[i-1];
}
/* Start a new global queue */
global_oldq = (int64_t*)xmalloc(global_newq_count * sizeof(int64_t));
/* Gather the global new frontier */
MPI_Allgatherv(oldq, oldq_count, MPI_INT64_T, global_oldq, counts, displs, MPI_INT64_T, MPI_COMM_WORLD);
/* Start the initial receive. */
if (num_ranks_done < size) {
MPI_Irecv(recvbuf, coalescing_size * 2, MPI_INT64_T, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &recvreq);
recvreq_active = 1;
}
/* Step through every locally owned vertices. */
size_t i;
int64_t tgt;
int64_t src;
/* Step through all vertices in global frontier */
for (int64_t k = 0; k < global_newq_count; k++){
src = global_oldq[k];
for (i = 0; i < nlocalverts; i++) {
CHECK_MPI_REQS;
/* Check if the current local vertex has been visited. */
if (!TEST_VISITED(localverts[i])){
//printf("\n vertex %lld has not been visited \n", localverts[i]);
/*If not visited, step through its incident edges. */
size_t j, j_end = g.rowstarts[VERTEX_LOCAL(localverts[i]) + 1];
for (j = g.rowstarts[VERTEX_LOCAL(localverts[i])]; j < j_end; ++j) {
/* Check any of the locally owned vertices share the same edge with global frontier */
int owner = VERTEX_OWNER(global_oldq[k]);
tgt = localverts[i];
if (g.column[j] == src){
SET_VISITED(tgt);
newq[newq_count++] = tgt;
/* Update the visited map, predecessor map and next-level queue locally */
pred[VERTEX_LOCAL(tgt)] = src;
}
} /* end of checking whether shared edges are detected */
} /* end of stepping through all incident edges */
} /* end of step through all locally owned vertices */
} /* end of stepping through all vertices in global frontier */
/* Flush any coalescing buffers that still have messages. */
int offset;
for (offset = 1; offset < size; ++offset) {
int dest = MOD_SIZE(rank + offset);
if (outgoing_counts[dest] != 0) {
while (outgoing_reqs_active[dest]) CHECK_MPI_REQS;
MPI_Isend(&outgoing[dest * coalescing_size * 2], outgoing_counts[dest], MPI_INT64_T, dest, 0, MPI_COMM_WORLD, &outgoing_reqs[dest]);
outgoing_reqs_active[dest] = 1;
outgoing_counts[dest] = 0;
}
/* Wait until all sends to this destination are done. */
while (outgoing_reqs_active[dest]) CHECK_MPI_REQS;
/* Tell the destination that we are done sending to them. */
MPI_Isend(&outgoing[dest * coalescing_size * 2], 0, MPI_INT64_T, dest, 0, MPI_COMM_WORLD, &outgoing_reqs[dest]); /* Signal no more sends */
outgoing_reqs_active[dest] = 1;
while (outgoing_reqs_active[dest]) CHECK_MPI_REQS;
}
/* Wait until everyone else is done (and thus couldn't send us any more
* messages). */
while (num_ranks_done < size) CHECK_MPI_REQS;
/* Test globally if all queues are empty. */
global_newq_count;
MPI_Allreduce(&newq_count, &global_newq_count, 1, MPI_INT64_T, MPI_SUM, MPI_COMM_WORLD);
/* Quit if they all are empty. */
if (global_newq_count == 0) break;
/* Swap old and new queues; clear new queue for next level. */
{int64_t* temp = oldq; oldq = newq; newq = temp;}
oldq_count = newq_count;
newq_count = 0;
} /* end of the bottom-up BFS */
} /* end of the while loop that goes through entire graph */
#undef CHECK_MPI_REQS
} /* end of the run_bfs() */
void get_vertex_distribution_for_pred(size_t count, const int64_t* vertex_p, int* owner_p, size_t* local_p) {
const int64_t* restrict vertex = vertex_p;
int* restrict owner = owner_p;
size_t* restrict local = local_p;
ptrdiff_t i;
#pragma omp parallel for
for (i = 0; i < (ptrdiff_t)count; ++i) {
owner[i] = VERTEX_OWNER(vertex[i]);
local[i] = VERTEX_LOCAL(vertex[i]);
}
}
int64_t vertex_to_global_for_pred(int v_rank, size_t v_local) {
return VERTEX_TO_GLOBAL(v_rank, v_local);
}
size_t get_nlocalverts_for_pred(void) {
return g.nlocalverts;
}
|
residualbased_newton_raphson_strategy.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ \.
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
#if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_STRATEGY)
#define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_STRATEGY
// System includes
// External includes
// Project includes
#include "includes/define.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "utilities/builtin_timer.h"
//default builder and solver
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedNewtonRaphsonStrategy
* @ingroup KratosCore
* @brief This is the base Newton Raphson strategy
* @details This strategy iterates until the convergence is achieved (or the maximum number of iterations is surpassed) using a Newton Raphson algorithm
* @author Riccardo Rossi
*/
template <class TSparseSpace,
class TDenseSpace, // = DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedNewtonRaphsonStrategy
: public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType;
// Counted pointer of ClassName
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedNewtonRaphsonStrategy);
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> ClassType;
typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType;
typedef typename BaseType::TDataType TDataType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TSchemeType TSchemeType;
//typedef typename BaseType::DofSetType DofSetType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor
*/
explicit ResidualBasedNewtonRaphsonStrategy() : BaseType()
{
}
/**
* @brief Default constructor. (with parameters)
* @param rModelPart The model part of the problem
* @param ThisParameters The configuration parameters
*/
explicit ResidualBasedNewtonRaphsonStrategy(ModelPart& rModelPart)
: ResidualBasedNewtonRaphsonStrategy(rModelPart, ResidualBasedNewtonRaphsonStrategy::GetDefaultParameters())
{
}
/**
* @brief Default constructor. (with parameters)
* @param rModelPart The model part of the problem
* @param ThisParameters The configuration parameters
*/
explicit ResidualBasedNewtonRaphsonStrategy(ModelPart& rModelPart, Parameters ThisParameters)
: BaseType(rModelPart),
mSolutionStepIsInitialized(false),
mInitializeWasPerformed(false),
mKeepSystemConstantDuringIterations(false)
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
// Getting builder and solver
auto p_builder_and_solver = GetBuilderAndSolver();
if (p_builder_and_solver != nullptr) {
// Tells to the builder and solver if the reactions have to be Calculated or not
p_builder_and_solver->SetCalculateReactionsFlag(mCalculateReactionsFlag);
// Tells to the Builder And Solver if the system matrix and vectors need to
// be reshaped at each step or not
p_builder_and_solver->SetReshapeMatrixFlag(mReformDofSetAtEachStep);
} else {
KRATOS_WARNING("ResidualBasedNewtonRaphsonStrategy") << "BuilderAndSolver is not initialized. Please assign one before settings flags" << std::endl;
}
mpA = TSparseSpace::CreateEmptyMatrixPointer();
mpDx = TSparseSpace::CreateEmptyVectorPointer();
mpb = TSparseSpace::CreateEmptyVectorPointer();
}
/**
* Default constructor
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param MaxIterations The maximum number of non-linear iterations to be considered when solving the problem
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
explicit ResidualBasedNewtonRaphsonStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
int MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false)
: BaseType(rModelPart, MoveMeshFlag),
mpScheme(pScheme),
mpConvergenceCriteria(pNewConvergenceCriteria),
mReformDofSetAtEachStep(ReformDofSetAtEachStep),
mCalculateReactionsFlag(CalculateReactions),
mSolutionStepIsInitialized(false),
mMaxIterationNumber(MaxIterations),
mInitializeWasPerformed(false),
mKeepSystemConstantDuringIterations(false)
{
KRATOS_TRY;
// Setting up the default builder and solver
mpBuilderAndSolver = typename TBuilderAndSolverType::Pointer(
new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSolver));
// Tells to the builder and solver if the reactions have to be Calculated or not
mpBuilderAndSolver->SetCalculateReactionsFlag(mCalculateReactionsFlag);
// Tells to the Builder And Solver if the system matrix and vectors need to
// be reshaped at each step or not
mpBuilderAndSolver->SetReshapeMatrixFlag(mReformDofSetAtEachStep);
// Set EchoLevel to the default value (only time is displayed)
SetEchoLevel(1);
// By default the matrices are rebuilt at each iteration
this->SetRebuildLevel(2);
mpA = TSparseSpace::CreateEmptyMatrixPointer();
mpDx = TSparseSpace::CreateEmptyVectorPointer();
mpb = TSparseSpace::CreateEmptyVectorPointer();
KRATOS_CATCH("");
}
/**
* @brief Constructor specifying the builder and solver
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewConvergenceCriteria The convergence criteria employed
* @param pNewBuilderAndSolver The builder and solver employed
* @param MaxIterations The maximum number of non-linear iterations to be considered when solving the problem
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
explicit ResidualBasedNewtonRaphsonStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
int MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false)
: BaseType(rModelPart, MoveMeshFlag),
mpScheme(pScheme),
mpBuilderAndSolver(pNewBuilderAndSolver),
mpConvergenceCriteria(pNewConvergenceCriteria),
mReformDofSetAtEachStep(ReformDofSetAtEachStep),
mCalculateReactionsFlag(CalculateReactions),
mSolutionStepIsInitialized(false),
mMaxIterationNumber(MaxIterations),
mInitializeWasPerformed(false),
mKeepSystemConstantDuringIterations(false)
{
KRATOS_TRY
// Getting builder and solver
auto p_builder_and_solver = GetBuilderAndSolver();
// Tells to the builder and solver if the reactions have to be Calculated or not
p_builder_and_solver->SetCalculateReactionsFlag(mCalculateReactionsFlag);
// Tells to the Builder And Solver if the system matrix and vectors need to
//be reshaped at each step or not
p_builder_and_solver->SetReshapeMatrixFlag(mReformDofSetAtEachStep);
// Set EchoLevel to the default value (only time is displayed)
SetEchoLevel(1);
// By default the matrices are rebuilt at each iteration
this->SetRebuildLevel(2);
mpA = TSparseSpace::CreateEmptyMatrixPointer();
mpDx = TSparseSpace::CreateEmptyVectorPointer();
mpb = TSparseSpace::CreateEmptyVectorPointer();
KRATOS_CATCH("")
}
/**
* @brief Constructor specifying the builder and solver
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param pNewBuilderAndSolver The builder and solver employed
* @param MaxIterations The maximum number of non-linear iterations to be considered when solving the problem
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
KRATOS_DEPRECATED_MESSAGE("Constructor deprecated, please use the constructor without linear solver")
explicit ResidualBasedNewtonRaphsonStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
int MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false)
: ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag)
{
KRATOS_TRY
KRATOS_WARNING("ResidualBasedNewtonRaphsonStrategy") << "This constructor is deprecated, please use the constructor without linear solver" << std::endl;
// Getting builder and solver
auto p_builder_and_solver = GetBuilderAndSolver();
// We check if the linear solver considered for the builder and solver is consistent
auto p_linear_solver = p_builder_and_solver->GetLinearSystemSolver();
KRATOS_ERROR_IF(p_linear_solver != pNewLinearSolver) << "Inconsistent linear solver in strategy and builder and solver. Considering the linear solver assigned to builder and solver :\n" << p_linear_solver->Info() << "\n instead of:\n" << pNewLinearSolver->Info() << std::endl;
KRATOS_CATCH("")
}
/**
* Constructor with Parameters
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param Settings Settings used in the strategy
*/
ResidualBasedNewtonRaphsonStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
Parameters Settings)
: BaseType(rModelPart),
mpScheme(pScheme),
mpConvergenceCriteria(pNewConvergenceCriteria),
mSolutionStepIsInitialized(false),
mInitializeWasPerformed(false),
mKeepSystemConstantDuringIterations(false)
{
KRATOS_TRY;
// Setting up the default builder and solver
mpBuilderAndSolver = typename TBuilderAndSolverType::Pointer(
new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSolver));
// Tells to the builder and solver if the reactions have to be Calculated or not
mpBuilderAndSolver->SetCalculateReactionsFlag(mCalculateReactionsFlag);
// Tells to the Builder And Solver if the system matrix and vectors need to
// be reshaped at each step or not
mpBuilderAndSolver->SetReshapeMatrixFlag(mReformDofSetAtEachStep);
// Set EchoLevel to the default value (only time is displayed)
SetEchoLevel(1);
// By default the matrices are rebuilt at each iteration
this->SetRebuildLevel(2);
mpA = TSparseSpace::CreateEmptyMatrixPointer();
mpDx = TSparseSpace::CreateEmptyVectorPointer();
mpb = TSparseSpace::CreateEmptyVectorPointer();
KRATOS_CATCH("");
}
/**
* @brief Constructor specifying the builder and solver and using Parameters
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param pNewBuilderAndSolver The builder and solver employed
* @param Settings Settings used in the strategy
*/
ResidualBasedNewtonRaphsonStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
Parameters Settings)
: BaseType(rModelPart),
mpScheme(pScheme),
mpBuilderAndSolver(pNewBuilderAndSolver),
mpConvergenceCriteria(pNewConvergenceCriteria),
mSolutionStepIsInitialized(false),
mInitializeWasPerformed(false),
mKeepSystemConstantDuringIterations(false)
{
KRATOS_TRY
// Getting builder and solver
auto p_builder_and_solver = GetBuilderAndSolver();
// Tells to the builder and solver if the reactions have to be Calculated or not
p_builder_and_solver->SetCalculateReactionsFlag(mCalculateReactionsFlag);
// Tells to the Builder And Solver if the system matrix and vectors need to
//be reshaped at each step or not
p_builder_and_solver->SetReshapeMatrixFlag(mReformDofSetAtEachStep);
// Set EchoLevel to the default value (only time is displayed)
SetEchoLevel(1);
// By default the matrices are rebuilt at each iteration
this->SetRebuildLevel(2);
mpA = TSparseSpace::CreateEmptyMatrixPointer();
mpDx = TSparseSpace::CreateEmptyVectorPointer();
mpb = TSparseSpace::CreateEmptyVectorPointer();
KRATOS_CATCH("")
}
/**
* @brief Constructor specifying the builder and solver and using Parameters
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param pNewBuilderAndSolver The builder and solver employed
* @param Parameters Settings used in the strategy
*/
KRATOS_DEPRECATED_MESSAGE("Constructor deprecated, please use the constructor without linear solver")
ResidualBasedNewtonRaphsonStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
Parameters Settings)
: ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, Settings)
{
KRATOS_TRY
KRATOS_WARNING("ResidualBasedNewtonRaphsonStrategy") << "This constructor is deprecated, please use the constructor without linear solver" << std::endl;
// Getting builder and solver
auto p_builder_and_solver = GetBuilderAndSolver();
// We check if the linear solver considered for the builder and solver is consistent
auto p_linear_solver = p_builder_and_solver->GetLinearSystemSolver();
KRATOS_ERROR_IF(p_linear_solver != pNewLinearSolver) << "Inconsistent linear solver in strategy and builder and solver. Considering the linear solver assigned to builder and solver :\n" << p_linear_solver->Info() << "\n instead of:\n" << pNewLinearSolver->Info() << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Destructor.
* @details In trilinos third party library, the linear solver's preconditioner should be freed before the system matrix. We control the deallocation order with Clear().
*/
~ResidualBasedNewtonRaphsonStrategy() override
{
// If the linear solver has not been deallocated, clean it before
// deallocating mpA. This prevents a memory error with the the ML
// solver (which holds a reference to it).
// NOTE: The linear solver is hold by the B&S
auto p_builder_and_solver = this->GetBuilderAndSolver();
if (p_builder_and_solver != nullptr) {
p_builder_and_solver->Clear();
}
// Deallocating system vectors to avoid errors in MPI. Clear calls
// TrilinosSpace::Clear for the vectors, which preserves the Map of
// current vectors, performing MPI calls in the process. Due to the
// way Python garbage collection works, this may happen after
// MPI_Finalize has already been called and is an error. Resetting
// the pointers here prevents Clear from operating with the
// (now deallocated) vectors.
mpA.reset();
mpDx.reset();
mpb.reset();
Clear();
}
/**
* @brief Set method for the time scheme
* @param pScheme The pointer to the time scheme considered
*/
void SetScheme(typename TSchemeType::Pointer pScheme)
{
mpScheme = pScheme;
};
/**
* @brief Get method for the time scheme
* @return mpScheme: The pointer to the time scheme considered
*/
typename TSchemeType::Pointer GetScheme()
{
return mpScheme;
};
/**
* @brief Set method for the builder and solver
* @param pNewBuilderAndSolver The pointer to the builder and solver considered
*/
void SetBuilderAndSolver(typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver)
{
mpBuilderAndSolver = pNewBuilderAndSolver;
};
/**
* @brief Get method for the builder and solver
* @return mpBuilderAndSolver: The pointer to the builder and solver considered
*/
typename TBuilderAndSolverType::Pointer GetBuilderAndSolver()
{
return mpBuilderAndSolver;
};
/**
* @brief This method sets the flag mInitializeWasPerformed
* @param InitializePerformedFlag The flag that tells if the initialize has been computed
*/
void SetInitializePerformedFlag(bool InitializePerformedFlag = true)
{
mInitializeWasPerformed = InitializePerformedFlag;
}
/**
* @brief This method gets the flag mInitializeWasPerformed
* @return mInitializeWasPerformed: The flag that tells if the initialize has been computed
*/
bool GetInitializePerformedFlag()
{
return mInitializeWasPerformed;
}
/**
* @brief This method sets the flag mCalculateReactionsFlag
* @param CalculateReactionsFlag The flag that tells if the reactions are computed
*/
void SetCalculateReactionsFlag(bool CalculateReactionsFlag)
{
mCalculateReactionsFlag = CalculateReactionsFlag;
}
/**
* @brief This method returns the flag mCalculateReactionsFlag
* @return The flag that tells if the reactions are computed
*/
bool GetCalculateReactionsFlag()
{
return mCalculateReactionsFlag;
}
/**
* @brief This method sets the flag mFullUpdateFlag
* @param UseOldStiffnessInFirstIterationFlag The flag that tells if
*/
void SetUseOldStiffnessInFirstIterationFlag(bool UseOldStiffnessInFirstIterationFlag)
{
mUseOldStiffnessInFirstIteration = UseOldStiffnessInFirstIterationFlag;
}
/**
* @brief This method returns the flag mFullUpdateFlag
* @return The flag that tells if
*/
bool GetUseOldStiffnessInFirstIterationFlag()
{
return mUseOldStiffnessInFirstIteration;
}
/**
* @brief This method sets the flag mReformDofSetAtEachStep
* @param Flag The flag that tells if each time step the system is rebuilt
*/
void SetReformDofSetAtEachStepFlag(bool Flag)
{
mReformDofSetAtEachStep = Flag;
GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep);
}
/**
* @brief This method returns the flag mReformDofSetAtEachStep
* @return The flag that tells if each time step the system is rebuilt
*/
bool GetReformDofSetAtEachStepFlag()
{
return mReformDofSetAtEachStep;
}
/**
* @brief This method sets the flag mMaxIterationNumber
* @param MaxIterationNumber This is the maximum number of on linear iterations
*/
void SetMaxIterationNumber(unsigned int MaxIterationNumber)
{
mMaxIterationNumber = MaxIterationNumber;
}
/**
* @brief This method gets the flag mMaxIterationNumber
* @return mMaxIterationNumber: This is the maximum number of on linear iterations
*/
unsigned int GetMaxIterationNumber()
{
return mMaxIterationNumber;
}
/**
* @brief It sets the level of echo for the solving strategy
* @param Level The level to set
* @details The different levels of echo are:
* - 0: Mute... no echo at all
* - 1: Printing time and basic informations
* - 2: Printing linear solver data
* - 3: Print of debug informations: Echo of stiffness matrix, Dx, b...
*/
void SetEchoLevel(int Level) override
{
BaseType::mEchoLevel = Level;
GetBuilderAndSolver()->SetEchoLevel(Level);
}
//*********************************************************************************
/**OPERATIONS ACCESSIBLE FROM THE INPUT: **/
/**
* @brief Create method
* @param rModelPart The model part of the problem
* @param ThisParameters The configuration parameters
*/
typename BaseType::Pointer Create(
ModelPart& rModelPart,
Parameters ThisParameters
) const override
{
return Kratos::make_shared<ClassType>(rModelPart, ThisParameters);
}
/**
* @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the
values of the solution step of interest are assumed equal to the old values
*/
void Predict() override
{
KRATOS_TRY
const DataCommunicator &r_comm = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator();
//OPERATIONS THAT SHOULD BE DONE ONCE - internal check to avoid repetitions
//if the operations needed were already performed this does nothing
if (mInitializeWasPerformed == false)
Initialize();
//initialize solution step
if (mSolutionStepIsInitialized == false)
InitializeSolutionStep();
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
DofsArrayType& r_dof_set = GetBuilderAndSolver()->GetDofSet();
GetScheme()->Predict(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb);
// Applying constraints if needed
auto& r_constraints_array = BaseType::GetModelPart().MasterSlaveConstraints();
const int local_number_of_constraints = r_constraints_array.size();
const int global_number_of_constraints = r_comm.SumAll(local_number_of_constraints);
if(global_number_of_constraints != 0) {
const auto& r_process_info = BaseType::GetModelPart().GetProcessInfo();
const auto it_const_begin = r_constraints_array.begin();
#pragma omp parallel for
for(int i=0; i<static_cast<int>(local_number_of_constraints); ++i)
(it_const_begin + i)->ResetSlaveDofs(r_process_info);
#pragma omp parallel for
for(int i=0; i<static_cast<int>(local_number_of_constraints); ++i)
(it_const_begin + i)->Apply(r_process_info);
// The following is needed since we need to eventually compute time derivatives after applying
// Master slave relations
TSparseSpace::SetToZero(rDx);
this->GetScheme()->Update(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb);
}
// Move the mesh if needed
if (this->MoveMeshFlag() == true)
BaseType::MoveMesh();
KRATOS_CATCH("")
}
/**
* @brief Initialization of member variables and prior operations
*/
void Initialize() override
{
KRATOS_TRY;
if (mInitializeWasPerformed == false)
{
//pointers needed in the solution
typename TSchemeType::Pointer p_scheme = GetScheme();
typename TConvergenceCriteriaType::Pointer p_convergence_criteria = mpConvergenceCriteria;
//Initialize The Scheme - OPERATIONS TO BE DONE ONCE
if (p_scheme->SchemeIsInitialized() == false)
p_scheme->Initialize(BaseType::GetModelPart());
//Initialize The Elements - OPERATIONS TO BE DONE ONCE
if (p_scheme->ElementsAreInitialized() == false)
p_scheme->InitializeElements(BaseType::GetModelPart());
//Initialize The Conditions - OPERATIONS TO BE DONE ONCE
if (p_scheme->ConditionsAreInitialized() == false)
p_scheme->InitializeConditions(BaseType::GetModelPart());
//initialisation of the convergence criteria
if (p_convergence_criteria->IsInitialized() == false)
p_convergence_criteria->Initialize(BaseType::GetModelPart());
mInitializeWasPerformed = true;
}
KRATOS_CATCH("");
}
/**
* @brief Clears the internal storage
*/
void Clear() override
{
KRATOS_TRY;
// Setting to zero the internal flag to ensure that the dof sets are recalculated. Also clear the linear solver stored in the B&S
auto p_builder_and_solver = GetBuilderAndSolver();
if (p_builder_and_solver != nullptr) {
p_builder_and_solver->SetDofSetIsInitializedFlag(false);
p_builder_and_solver->Clear();
}
// Clearing the system of equations
if (mpA != nullptr)
SparseSpaceType::Clear(mpA);
if (mpDx != nullptr)
SparseSpaceType::Clear(mpDx);
if (mpb != nullptr)
SparseSpaceType::Clear(mpb);
// Clearing scheme
auto p_scheme = GetScheme();
if (p_scheme != nullptr) {
GetScheme()->Clear();
}
mInitializeWasPerformed = false;
mSolutionStepIsInitialized = false;
KRATOS_CATCH("");
}
/**
* @brief This should be considered as a "post solution" convergence check which is useful for coupled analysis - the convergence criteria used is the one used inside the "solve" step
*/
bool IsConverged() override
{
KRATOS_TRY;
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
if (mpConvergenceCriteria->GetActualizeRHSflag() == true)
{
TSparseSpace::SetToZero(rb);
GetBuilderAndSolver()->BuildRHS(GetScheme(), BaseType::GetModelPart(), rb);
}
return mpConvergenceCriteria->PostCriteria(BaseType::GetModelPart(), GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb);
KRATOS_CATCH("");
}
/**
* @brief This operations should be called before printing the results when non trivial results
* (e.g. stresses)
* Need to be calculated given the solution of the step
* @details This operations should be called only when needed, before printing as it can involve a non
* negligible cost
*/
void CalculateOutputData() override
{
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
GetScheme()->CalculateOutputData(BaseType::GetModelPart(),
GetBuilderAndSolver()->GetDofSet(),
rA, rDx, rb);
}
/**
* @brief Performs all the required operations that should be done (for each step) before solving the solution step.
* @details A member variable should be used as a flag to make sure this function is called only once per step.
*/
void InitializeSolutionStep() override
{
KRATOS_TRY;
if (!mSolutionStepIsInitialized) {
// Pointers needed in the solution
typename TSchemeType::Pointer p_scheme = GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver();
ModelPart& r_model_part = BaseType::GetModelPart();
//set up the system, operation performed just once unless it is required
//to reform the dof set at each iteration
BuiltinTimer system_construction_time;
if (p_builder_and_solver->GetDofSetIsInitializedFlag() == false ||
mReformDofSetAtEachStep == true)
{
//setting up the list of the DOFs to be solved
BuiltinTimer setup_dofs_time;
p_builder_and_solver->SetUpDofSet(p_scheme, r_model_part);
KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", BaseType::GetEchoLevel() > 0) << "Setup Dofs Time: "
<< setup_dofs_time.ElapsedSeconds() << std::endl;
//shaping correctly the system
BuiltinTimer setup_system_time;
p_builder_and_solver->SetUpSystem(r_model_part);
KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", BaseType::GetEchoLevel() > 0) << "Setup System Time: "
<< setup_system_time.ElapsedSeconds() << std::endl;
//setting up the Vectors involved to the correct size
BuiltinTimer system_matrix_resize_time;
p_builder_and_solver->ResizeAndInitializeVectors(p_scheme, mpA, mpDx, mpb,
r_model_part);
KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", BaseType::GetEchoLevel() > 0) << "System Matrix Resize Time: "
<< system_matrix_resize_time.ElapsedSeconds() << std::endl;
}
KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", BaseType::GetEchoLevel() > 0) << "System Construction Time: "
<< system_construction_time.ElapsedSeconds() << std::endl;
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
// Initial operations ... things that are constant over the Solution Step
p_builder_and_solver->InitializeSolutionStep(r_model_part, rA, rDx, rb);
// Initial operations ... things that are constant over the Solution Step
p_scheme->InitializeSolutionStep(r_model_part, rA, rDx, rb);
// Initialisation of the convergence criteria
if (mpConvergenceCriteria->GetActualizeRHSflag() == true)
{
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb);
}
mpConvergenceCriteria->InitializeSolutionStep(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb);
if (mpConvergenceCriteria->GetActualizeRHSflag() == true)
TSparseSpace::SetToZero(rb);
mSolutionStepIsInitialized = true;
}
KRATOS_CATCH("");
}
/**
* @brief Performs all the required operations that should be done (for each step) after solving the solution step.
* @details A member variable should be used as a flag to make sure this function is called only once per step.
*/
void FinalizeSolutionStep() override
{
KRATOS_TRY;
ModelPart& r_model_part = BaseType::GetModelPart();
typename TSchemeType::Pointer p_scheme = GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver();
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
//Finalisation of the solution step,
//operations to be done after achieving convergence, for example the
//Final Residual Vector (mb) has to be saved in there
//to avoid error accumulation
p_scheme->FinalizeSolutionStep(r_model_part, rA, rDx, rb);
p_builder_and_solver->FinalizeSolutionStep(r_model_part, rA, rDx, rb);
mpConvergenceCriteria->FinalizeSolutionStep(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb);
//Cleaning memory after the solution
p_scheme->Clean();
//reset flags for next step
mSolutionStepIsInitialized = false;
if (mReformDofSetAtEachStep == true) //deallocate the systemvectors
{
this->Clear();
}
KRATOS_CATCH("");
}
/**
* @brief Solves the current step. This function returns true if a solution has been found, false otherwise.
*/
bool SolveSolutionStep() override
{
// Pointers needed in the solution
ModelPart& r_model_part = BaseType::GetModelPart();
typename TSchemeType::Pointer p_scheme = GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver();
auto& r_dof_set = p_builder_and_solver->GetDofSet();
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
//initializing the parameters of the Newton-Raphson cycle
unsigned int iteration_number = 1;
r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number;
bool residual_is_updated = false;
p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb);
mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
bool is_converged = mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb);
// Function to perform the building and the solving phase.
if (BaseType::mRebuildLevel > 0 || BaseType::mStiffnessMatrixIsBuilt == false) {
TSparseSpace::SetToZero(rA);
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
if (mUseOldStiffnessInFirstIteration){
p_builder_and_solver->BuildAndSolveLinearizedOnPreviousIteration(p_scheme, r_model_part, rA, rDx, rb,BaseType::MoveMeshFlag());
} else {
p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
} else {
TSparseSpace::SetToZero(rDx); // Dx = 0.00;
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
// Debugging info
EchoInfo(iteration_number);
// Updating the results stored in the database
UpdateDatabase(rA, rDx, rb, BaseType::MoveMeshFlag());
p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb);
mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
if (is_converged) {
if (mpConvergenceCriteria->GetActualizeRHSflag()) {
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb);
}
is_converged = mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb);
}
//Iteration Cycle... performed only for NonLinearProblems
while (is_converged == false &&
iteration_number++ < mMaxIterationNumber)
{
//setting the number of iteration
r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number;
p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb);
mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
is_converged = mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb);
//call the linear system solver to find the correction mDx for the
//it is not called if there is no system to solve
if (SparseSpaceType::Size(rDx) != 0)
{
if (BaseType::mRebuildLevel > 1 || BaseType::mStiffnessMatrixIsBuilt == false)
{
if (GetKeepSystemConstantDuringIterations() == false)
{
//A = 0.00;
TSparseSpace::SetToZero(rA);
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
else
{
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
}
else
{
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
}
else
{
KRATOS_WARNING("NO DOFS") << "ATTENTION: no free DOFs!! " << std::endl;
}
// Debugging info
EchoInfo(iteration_number);
// Updating the results stored in the database
UpdateDatabase(rA, rDx, rb, BaseType::MoveMeshFlag());
p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb);
mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
residual_is_updated = false;
if (is_converged == true)
{
if (mpConvergenceCriteria->GetActualizeRHSflag() == true)
{
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb);
residual_is_updated = true;
}
is_converged = mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb);
}
}
//plots a warning if the maximum number of iterations is exceeded
if (iteration_number >= mMaxIterationNumber) {
MaxIterationsExceeded();
} else {
KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", this->GetEchoLevel() > 0)
<< "Convergence achieved after " << iteration_number << " / "
<< mMaxIterationNumber << " iterations" << std::endl;
}
//recalculate residual if needed
//(note that some convergence criteria need it to be recalculated)
if (residual_is_updated == false)
{
// NOTE:
// The following part will be commented because it is time consuming
// and there is no obvious reason to be here. If someone need this
// part please notify the community via mailing list before uncommenting it.
// Pooyan.
// TSparseSpace::SetToZero(mb);
// p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb);
}
//calculate reactions if required
if (mCalculateReactionsFlag == true)
p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb);
return is_converged;
}
/**
* @brief Function to perform expensive checks.
* @details It is designed to be called ONCE to verify that the input is correct.
*/
int Check() override
{
KRATOS_TRY
BaseType::Check();
GetBuilderAndSolver()->Check(BaseType::GetModelPart());
GetScheme()->Check(BaseType::GetModelPart());
mpConvergenceCriteria->Check(BaseType::GetModelPart());
return 0;
KRATOS_CATCH("")
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "newton_raphson_strategy",
"use_old_stiffness_in_first_iteration": false,
"max_iteration" : 10,
"reform_dofs_at_each_step" : false,
"compute_reactions" : false,
"builder_and_solver_settings" : {},
"convergence_criteria_settings" : {},
"linear_solver_settings" : {},
"scheme_settings" : {}
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "newton_raphson_strategy";
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
///@}
///@name Access
///@{
/**
* @brief This method returns the LHS matrix
* @return The LHS matrix
*/
TSystemMatrixType &GetSystemMatrix() override
{
TSystemMatrixType &mA = *mpA;
return mA;
}
/**
* @brief This method returns the RHS vector
* @return The RHS vector
*/
TSystemVectorType& GetSystemVector() override
{
TSystemVectorType& mb = *mpb;
return mb;
}
/**
* @brief This method returns the solution vector
* @return The Dx vector
*/
TSystemVectorType& GetSolutionVector() override
{
TSystemVectorType& mDx = *mpDx;
return mDx;
}
/**
* @brief Set method for the flag mKeepSystemConstantDuringIterations
* @param Value If we consider constant the system of equations during the iterations
*/
void SetKeepSystemConstantDuringIterations(bool Value)
{
mKeepSystemConstantDuringIterations = Value;
}
/**
* @brief Get method for the flag mKeepSystemConstantDuringIterations
* @return True if we consider constant the system of equations during the iterations, false otherwise
*/
bool GetKeepSystemConstantDuringIterations()
{
return mKeepSystemConstantDuringIterations;
}
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedNewtonRaphsonStrategy";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
private:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
protected:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
typename TSchemeType::Pointer mpScheme = nullptr; /// The pointer to the time scheme employed
typename TBuilderAndSolverType::Pointer mpBuilderAndSolver = nullptr; /// The pointer to the builder and solver employed
typename TConvergenceCriteriaType::Pointer mpConvergenceCriteria = nullptr; /// The pointer to the convergence criteria employed
TSystemVectorPointerType mpDx; /// The increment in the solution
TSystemVectorPointerType mpb; /// The RHS vector of the system of equations
TSystemMatrixPointerType mpA; /// The LHS matrix of the system of equations
/**
* @brief Flag telling if it is needed to reform the DofSet at each
solution step or if it is possible to form it just once
* @details Default = false
- true : Reforme at each time step
- false : Form just one (more efficient)
*/
bool mReformDofSetAtEachStep;
/**
* @brief Flag telling if it is needed or not to compute the reactions
* @details default = true
*/
bool mCalculateReactionsFlag;
/**
* @brief Flag telling if a full update of the database will be performed at the first iteration
* @details default = false
*/
bool mUseOldStiffnessInFirstIteration = false;
bool mSolutionStepIsInitialized; /// Flag to set as initialized the solution step
unsigned int mMaxIterationNumber; /// The maximum number of iterations, 30 by default
bool mInitializeWasPerformed; /// Flag to set as initialized the strategy
bool mKeepSystemConstantDuringIterations; // Flag to allow keeping system matrix constant during iterations
///@}
///@name Private Operators
///@{
/**
* @brief Here the database is updated
* @param A The LHS matrix of the system of equations
* @param Dx The incremement in the solution
* @param b The RHS vector of the system of equations
* @param MoveMesh The flag that allows to move the mesh
*/
virtual void UpdateDatabase(
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb,
const bool MoveMesh)
{
typename TSchemeType::Pointer p_scheme = GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver();
p_scheme->Update(BaseType::GetModelPart(), p_builder_and_solver->GetDofSet(), rA, rDx, rb);
// Move the mesh if needed
if (MoveMesh == true)
BaseType::MoveMesh();
}
/**
* @brief This method returns the components of the system of equations depending of the echo level
* @param IterationNumber The non linear iteration in the solution loop
*/
virtual void EchoInfo(const unsigned int IterationNumber)
{
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
if (this->GetEchoLevel() == 2) //if it is needed to print the debug info
{
KRATOS_INFO("Dx") << "Solution obtained = " << rDx << std::endl;
KRATOS_INFO("RHS") << "RHS = " << rb << std::endl;
}
else if (this->GetEchoLevel() == 3) //if it is needed to print the debug info
{
KRATOS_INFO("LHS") << "SystemMatrix = " << rA << std::endl;
KRATOS_INFO("Dx") << "Solution obtained = " << rDx << std::endl;
KRATOS_INFO("RHS") << "RHS = " << rb << std::endl;
}
else if (this->GetEchoLevel() == 4) //print to matrix market file
{
std::stringstream matrix_market_name;
matrix_market_name << "A_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << "_" << IterationNumber << ".mm";
TSparseSpace::WriteMatrixMarketMatrix((char *)(matrix_market_name.str()).c_str(), rA, false);
std::stringstream matrix_market_vectname;
matrix_market_vectname << "b_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << "_" << IterationNumber << ".mm.rhs";
TSparseSpace::WriteMatrixMarketVector((char *)(matrix_market_vectname.str()).c_str(), rb);
}
}
/**
* @brief This method prints information after reach the max number of iterations
*/
virtual void MaxIterationsExceeded()
{
KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", this->GetEchoLevel() > 0)
<< "ATTENTION: max iterations ( " << mMaxIterationNumber
<< " ) exceeded!" << std::endl;
}
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
mMaxIterationNumber = ThisParameters["max_iteration"].GetInt();
mReformDofSetAtEachStep = ThisParameters["reform_dofs_at_each_step"].GetBool();
mCalculateReactionsFlag = ThisParameters["compute_reactions"].GetBool();
mUseOldStiffnessInFirstIteration = ThisParameters["use_old_stiffness_in_first_iteration"].GetBool();
// Saving the convergence criteria to be used
if (ThisParameters["convergence_criteria_settings"].Has("name")) {
KRATOS_ERROR << "IMPLEMENTATION PENDING IN CONSTRUCTOR WITH PARAMETERS" << std::endl;
}
// Saving the scheme
if (ThisParameters["scheme_settings"].Has("name")) {
KRATOS_ERROR << "IMPLEMENTATION PENDING IN CONSTRUCTOR WITH PARAMETERS" << std::endl;
}
// Setting up the default builder and solver
if (ThisParameters["builder_and_solver_settings"].Has("name")) {
KRATOS_ERROR << "IMPLEMENTATION PENDING IN CONSTRUCTOR WITH PARAMETERS" << std::endl;
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/**
* Copy constructor.
*/
ResidualBasedNewtonRaphsonStrategy(const ResidualBasedNewtonRaphsonStrategy &Other){};
///@}
}; /* Class ResidualBasedNewtonRaphsonStrategy */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos. */
#endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_STRATEGY defined */
|
search.h | // -*- C++ -*-
// Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/search.h
* @brief Parallel implementation base for std::search() and
* std::search_n().
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Felix Putze.
#ifndef _GLIBCXX_PARALLEL_SEARCH_H
#define _GLIBCXX_PARALLEL_SEARCH_H 1
#include <bits/stl_algobase.h>
#include <parallel/parallel.h>
#include <parallel/equally_split.h>
namespace __gnu_parallel
{
/**
* @brief Precalculate advances for Knuth-Morris-Pratt algorithm.
* @param elements Begin iterator of sequence to search for.
* @param length Length of sequence to search for.
* @param advances Returned offsets.
*/
template<typename RandomAccessIterator, typename _DifferenceTp>
void
calc_borders(RandomAccessIterator elements, _DifferenceTp length,
_DifferenceTp* off)
{
typedef _DifferenceTp difference_type;
off[0] = -1;
if (length > 1)
off[1] = 0;
difference_type k = 0;
for (difference_type j = 2; j <= length; j++)
{
while ((k >= 0) && !(elements[k] == elements[j-1]))
k = off[k];
off[j] = ++k;
}
}
// Generic parallel find algorithm (requires random access iterator).
/** @brief Parallel std::search.
* @param begin1 Begin iterator of first sequence.
* @param end1 End iterator of first sequence.
* @param begin2 Begin iterator of second sequence.
* @param end2 End iterator of second sequence.
* @param pred Find predicate.
* @return Place of finding in first sequences. */
template<typename _RandomAccessIterator1,
typename _RandomAccessIterator2,
typename Pred>
_RandomAccessIterator1
search_template(_RandomAccessIterator1 begin1, _RandomAccessIterator1 end1,
_RandomAccessIterator2 begin2, _RandomAccessIterator2 end2,
Pred pred)
{
typedef std::iterator_traits<_RandomAccessIterator1> traits_type;
typedef typename traits_type::difference_type difference_type;
_GLIBCXX_CALL((end1 - begin1) + (end2 - begin2));
difference_type pattern_length = end2 - begin2;
// Pattern too short.
if(pattern_length <= 0)
return end1;
// Last point to start search.
difference_type input_length = (end1 - begin1) - pattern_length;
// Where is first occurrence of pattern? defaults to end.
difference_type result = (end1 - begin1);
difference_type *splitters;
// Pattern too long.
if (input_length < 0)
return end1;
omp_lock_t result_lock;
omp_init_lock(&result_lock);
thread_index_t num_threads =
std::max<difference_type>(1,
std::min<difference_type>(input_length, get_max_threads()));
difference_type advances[pattern_length];
calc_borders(begin2, pattern_length, advances);
# pragma omp parallel num_threads(num_threads)
{
# pragma omp single
{
num_threads = omp_get_num_threads();
splitters = new difference_type[num_threads + 1];
equally_split(input_length, num_threads, splitters);
}
thread_index_t iam = omp_get_thread_num();
difference_type start = splitters[iam], stop = splitters[iam + 1];
difference_type pos_in_pattern = 0;
bool found_pattern = false;
while (start <= stop && !found_pattern)
{
// Get new value of result.
#pragma omp flush(result)
// No chance for this thread to find first occurrence.
if (result < start)
break;
while (pred(begin1[start + pos_in_pattern],
begin2[pos_in_pattern]))
{
++pos_in_pattern;
if (pos_in_pattern == pattern_length)
{
// Found new candidate for result.
omp_set_lock(&result_lock);
result = std::min(result, start);
omp_unset_lock(&result_lock);
found_pattern = true;
break;
}
}
// Make safe jump.
start += (pos_in_pattern - advances[pos_in_pattern]);
pos_in_pattern =
(advances[pos_in_pattern] < 0) ? 0 : advances[pos_in_pattern];
}
} //parallel
omp_destroy_lock(&result_lock);
delete[] splitters;
// Return iterator on found element.
return (begin1 + result);
}
} // end namespace
#endif /* _GLIBCXX_PARALLEL_SEARCH_H */
|
GB_unop__identity_uint32_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint32_fp64)
// op(A') function: GB (_unop_tran__identity_uint32_fp64)
// C type: uint32_t
// A type: double
// cast: uint32_t cij = GB_cast_to_uint32_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint32_fp64)
(
uint32_t *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint32_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__exp2_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__exp2_fc32_fc32)
// op(A') function: GB (_unop_tran__exp2_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_cexp2f (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cexp2f (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_cexp2f (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EXP2 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__exp2_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_cexp2f (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_cexp2f (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__exp2_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
inputFlush.c | // Test input for omp flush, considering several cases:
//
// a. omp flush, without any variables specified
// b. omp flush (var_list), with one or more variables specified
// c. orphaned omp flush
// d. #pragma omp flush has preprocessing info. attached
//
// Liao, 4/25/2008
#if defined(_OPENMP)
#include <omp.h>
#endif
double x=1.0;
int main(void)
{
int a=0,b;
#pragma omp parallel
{
#if 1
#pragma omp flush (a,b)
a=a+1;
#pragma omp flush
b=a;
#endif
}
return 0;
}
void foo()
{
#if 1
#pragma omp flush(x)
x = 2.0;
#endif
}
|
Layers.h | //
// smarties
// Copyright (c) 2018 CSE-Lab, ETH Zurich, Switzerland. All rights reserved.
// Distributed under the terms of the MIT license.
//
// Created by Guido Novati (novatig@ethz.ch).
//
#ifndef smarties_Layers_h
#define smarties_Layers_h
#include "Parameters.h"
#include "Activation.h"
#include "Functions.h"
#ifndef __STDC_VERSION__ //it should never be defined with g++
#define __STDC_VERSION__ 0
#endif
#if defined(USE_MKL)
#include "mkl_cblas.h"
#elif defined(USE_OPENBLAS)
#include "cblas.h"
#else
#define USE_OMPSIMD_BLAS
#endif
//#include <immintrin.h>
namespace smarties
{
#ifdef USE_OMPSIMD_BLAS
template<typename T>
inline static void GEMVomp(const Uint NX, const Uint NY, const Uint S,
const T * __restrict__ const _W,
const T * __restrict__ const _X,
T * __restrict__ const _Y)
{
assert(_W not_eq nullptr && _X not_eq nullptr && _Y not_eq nullptr);
#if 0
for (Uint o=0; o<NY; ++o) {
const T* __restrict__ const W = _W + S * o;
T Y = 0;
#pragma omp simd aligned(_X, W : VEC_WIDTH) reduction(+:Y)
for (Uint i=0; i<NX; ++i) Y += W[i] * _X[i];
_Y[o] += Y;
}
#else
static constexpr Uint cacheLineLen = 64 / sizeof(T);
for (Uint I=0; I<NX; I+=cacheLineLen)
for (Uint o=0; o<NY; ++o) {
const T* __restrict__ const W = _W + S * o;
T Y = 0;
const Uint Ninner = std::min(NX, I+cacheLineLen);
#pragma omp simd aligned(_X, W : VEC_WIDTH) reduction(+:Y)
for (Uint i=I; i<Ninner; ++i) Y += W[i] * _X[i];
_Y[o] += Y;
}
#endif
}
#endif
// Base class of all layer types. To insert a new layer type, overwrite all
// virtual functions.
class Layer
{
public:
const Uint size, ID, link, bInput;
Uint bOutput;
Uint spanCompInpGrads = 0, startCompInpGrads = 0;
inline Uint number() const { return ID; }
inline Uint nOutputs() const { return size; }
// Should return the number of weights and biases required by layer
virtual void requiredParameters(std::vector<Uint>& nWeight,
std::vector<Uint>& nBiases ) const = 0;
// Should return work memory that allows the network to compute forward step
// and then, without re-calling forward, compute backward step.
// See the LSTM class for an example on working out of the box.
virtual void requiredActivation(std::vector<Uint>& sizes,
std::vector<Uint>& bOutputs,
std::vector<Uint>& bInputs) const = 0;
// Some classes might allow user to specify an initial value for the bias
// vector (eg. parametric layer or linear output layer)
virtual void biasInitialValues(const std::vector<Real> init) = 0;
Layer(
Uint _ID,
Uint _size,
bool bOut,
bool bInp = false,
Uint _link = 0):
size(_size), ID(_ID), link(_link), bInput(bInp), bOutput(bOut) {}
virtual std::string printSpecs() const = 0;
virtual ~Layer() {}
virtual void forward( const Activation*const prev,
const Activation*const curr,
const Parameters*const para) const = 0;
// forward step without recurrent connection:
inline void forward( const Activation*const curr,
const Parameters*const para) const {
return forward(nullptr, curr, para);
}
virtual void backward( const Activation*const prev,
const Activation*const curr,
const Activation*const next,
const Parameters*const grad,
const Parameters*const para) const = 0;
// forward step without recurrent connection:
inline void backward( const Activation*const curr,
const Parameters*const grad,
const Parameters*const para) const {
return backward(nullptr, curr, nullptr, grad, para);
}
void backward(const Uint NI, const Uint NO, const Uint NOsimd, const Uint NR,
const Activation*const prev,
const Activation*const curr,
const Activation*const next,
const Parameters*const grad,
const Parameters*const para) const
{
const nnReal* const deltas = curr->E(ID);
if(NO == 0) return;
if( spanCompInpGrads )
{
nnReal* const errors = curr->E(ID-link);
const nnReal* const weight = para->W(ID);
#ifdef USE_OMPSIMD_BLAS
GEMVomp(NO, spanCompInpGrads, NOsimd,
weight + startCompInpGrads * NOsimd,
deltas, errors + startCompInpGrads);
#else
SMARTIES_gemv(CblasRowMajor, CblasNoTrans, spanCompInpGrads, NO, 1,
weight + startCompInpGrads * NOsimd, NOsimd,
deltas, 1, 1, errors + startCompInpGrads, 1);
#endif
}
if(NR && prev not_eq nullptr)
{
nnReal* const errors = prev->E(ID);
const nnReal* const weight = para->W(ID) +NOsimd*NI;
#ifdef USE_OMPSIMD_BLAS
GEMVomp(NO, NR, NOsimd, weight, deltas, errors);
#else
SMARTIES_gemv(CblasRowMajor, CblasNoTrans, NR, NO, 1,
weight, NOsimd, deltas, 1, 1, errors, 1);
#endif
}
if(grad == nullptr) return;
{
nnReal* const grad_b = grad->B(ID);
#pragma omp simd aligned(deltas, grad_b : VEC_WIDTH)
for(Uint o=0; o<NO; ++o) grad_b[o] += deltas[o];
}
{
const nnReal* const inputs = curr->Y(ID-link);
nnReal* const grad_w = grad->W(ID);
for(Uint i=0; i<NI; ++i) {
nnReal* const G = grad_w + NOsimd*i;
#pragma omp simd aligned(deltas,inputs,G : VEC_WIDTH)
for(Uint o=0; o<NO; ++o) G[o] += inputs[i] * deltas[o];
}
}
if(NR && prev not_eq nullptr)
{
const nnReal* const inputs = prev->Y(ID);
nnReal* const grad_w = grad->W(ID) +NOsimd*NI;
for(Uint i=0; i<NR; ++i) {
nnReal* const G = grad_w + NOsimd*i;
#pragma omp simd aligned(deltas, inputs, G : VEC_WIDTH)
for(Uint o=0; o<NO; ++o) G[o] += inputs[i] * deltas[o];
}
}
}
// Initialize the weights and biases. Probably by sampling.
virtual void initialize(std::mt19937& G, const Parameters*const W,
Real initializationFac) const = 0;
virtual size_t save(const Parameters * const para,
float * tmp) const = 0;
virtual size_t restart(const Parameters * const para,
const float * tmp) const = 0;
};
class InputLayer: public Layer
{
public:
InputLayer(Uint _size, Uint _ID) : Layer(_ID, _size, false, true) { }
std::string printSpecs() const override {
return "(" + std::to_string(ID) + ") Input Layer of size:"
+ std::to_string(size) + "\n";
}
void requiredParameters(std::vector<Uint>& nWeight,
std::vector<Uint>& nBiases ) const override {
assert(nWeight.size() == 0 && nBiases.size() == 0);
nWeight.push_back(0);
nBiases.push_back(0);
}
void requiredActivation(std::vector<Uint>& sizes,
std::vector<Uint>& bOutputs,
std::vector<Uint>& bInputs) const override {
assert(sizes.size() == 0 && bOutputs.size() == 0);
sizes.push_back(size);
bOutputs.push_back(false);
bInputs.push_back(bInput);
}
void biasInitialValues(const std::vector<Real> init) override { }
void forward( const Activation*const prev,
const Activation*const curr,
const Parameters*const para) const override
{
#ifdef SMARTIES_INPUT_SANITIZE
// In case input has a very wide kurtosis, network grads might explode.
// (Remember that smarties gradually learns mean and stdev, so each input
// variable to the net can be thought to have mean 0 and stdev 1)
// Almost all inputs will be from -6 and 6 stdevs and will be untouched.
// From from 6 to 111 stdevs away, we smoothly transition to sqrt(x).
// Beyond 111 stdevs away we log the input to avoid exploding gradients.
nnReal* const ret = curr->Y(ID);
for (Uint j=0; j<size; ++j) {
const nnReal sign = ret[j]>0 ? 1 : -1, absX = std::fabs(ret[j]);
if (absX > 111) {
ret[j] = sign * 9.02 * std::log(absX - 56.88);
} else if (absX > 6) {
ret[j] = sign * std::sqrt(12 * absX - 36);
} // else leave as is
}
#endif
}
void backward( const Activation*const prev,
const Activation*const curr,
const Activation*const next,
const Parameters*const grad,
const Parameters*const para) const override { }
void initialize(std::mt19937& G, const Parameters*const W,
Real initializationFac) const override { }
size_t save(const Parameters * const para,
float * tmp) const override { return 0; }
size_t restart(const Parameters * const para,
const float * tmp) const override { return 0; }
};
class JoinLayer: public Layer
{
const Uint nJoin;
public:
JoinLayer(Uint _ID, Uint _N, Uint _nJ): Layer(_ID,_N,false), nJoin(_nJ) {
assert(nJoin>1);
}
std::string printSpecs() const override {
return "(" + std::to_string(ID) + ") Join Layer of size:"
+ std::to_string(size) + " joining the previous "
+ std::to_string(nJoin) + " layers\n";
}
void requiredParameters(std::vector<Uint>& nWeight,
std::vector<Uint>& nBiases ) const override {
assert(nWeight.size() == 0 && nBiases.size() == 0);
nWeight.push_back(0);
nBiases.push_back(0);
}
void requiredActivation(std::vector<Uint>& sizes,
std::vector<Uint>& bOutputs,
std::vector<Uint>& bInputs) const override {
assert(sizes.size() == 0 && bOutputs.size() == 0);
sizes.push_back(size);
bOutputs.push_back(bOutput);
bInputs.push_back(bInput);
}
void biasInitialValues(const std::vector<Real> init) override { }
void forward( const Activation*const prev,
const Activation*const curr,
const Parameters*const para) const override {
nnReal* const ret = curr->Y(ID);
Uint k = 0;
for (Uint i=1; i<=nJoin; ++i) {
const nnReal* const inputs = curr->Y(ID-i);
for (Uint j=0; j<curr->sizes[ID-i]; ++j) ret[k++] = inputs[j];
}
assert(k==size);
}
void backward( const Activation*const prev,
const Activation*const curr,
const Activation*const next,
const Parameters*const grad,
const Parameters*const para) const override
{
const nnReal* const errors = curr->E(ID);
Uint k = 0;
for (Uint i=1; i<=nJoin; ++i)
{
nnReal* const ret = curr->E(ID-i);
for (Uint j=0; j<curr->sizes[ID-i]; ++j) ret[j] = errors[k++];
}
assert(k==size);
}
void initialize(std::mt19937& G, const Parameters*const W,
Real initializationFac) const override { }
size_t save(const Parameters * const para,
float * tmp) const override { return 0; }
size_t restart(const Parameters * const para,
const float * tmp) const override { return 0; }
};
class ParametricResidualLayer: public Layer
{
public:
ParametricResidualLayer(Uint _ID, Uint _N): Layer(_ID, _N, false) { }
std::string printSpecs() const override {
return "("+ std::to_string(ID) +") Parametric Residual Connection of size:"
+ std::to_string(size) + "\n";
}
void requiredParameters(std::vector<Uint>& nWeight,
std::vector<Uint>& nBiases ) const override {
nWeight.push_back(size);
nBiases.push_back(size);
}
void requiredActivation(std::vector<Uint>& sizes,
std::vector<Uint>& bOutputs,
std::vector<Uint>& bInputs) const override {
sizes.push_back(size);
bOutputs.push_back(bOutput);
bInputs.push_back(false);
}
void biasInitialValues(const std::vector<Real> init) override { }
void forward( const Activation*const prev,
const Activation*const curr,
const Parameters*const para) const override
{
nnReal* const ret = curr->Y(ID);
assert(curr->sizes[ID-1] >= size);
memcpy(ret, curr->Y(ID-1), size * sizeof(nnReal));
const nnReal* const W = para->W(ID);
const nnReal* const B = para->B(ID);
const nnReal* const inp = curr->Y(ID-2);
const Uint sizeInp = std::min(curr->sizes[ID-2], size);
#pragma omp simd aligned(ret, inp, W, B : VEC_WIDTH)
for (Uint j=0; j<sizeInp; ++j) ret[j] += inp[j] * W[j] + B[j];
}
void backward( const Activation*const prev,
const Activation*const curr,
const Activation*const next,
const Parameters*const grad,
const Parameters*const para) const override
{
const nnReal* const delta = curr->E(ID);
assert(curr->sizes[ID-1] >= size);
memcpy(curr->E(ID-1), delta, size * sizeof(nnReal) );
nnReal* const gradInp = curr->E(ID-2);
const nnReal* const W = para->W(ID);
const nnReal* const inp = curr->Y(ID-2);
const Uint sizeInp = std::min(curr->sizes[ID-2], size);
if(grad == nullptr) {
#pragma omp simd aligned(delta, W, gradInp : VEC_WIDTH)
for (Uint j=0; j<sizeInp; ++j) gradInp[j] += delta[j] * W[j];
return;
}
nnReal* const gradB = grad->B(ID);
nnReal* const gradW = grad->W(ID);
#pragma omp simd aligned(delta,inp,W, gradB,gradW,gradInp : VEC_WIDTH)
for (Uint j=0; j<sizeInp; ++j) {
gradInp[j] += delta[j] * W[j];
gradW[j] += delta[j] * inp[j];
gradB[j] += delta[j];
}
}
void initialize(std::mt19937& G, const Parameters*const W,
Real initializationFac) const override
{
for(Uint o=0; o<size; ++o) W->B(ID)[o] = 0.0;
for(Uint o=0; o<size; ++o) W->W(ID)[o] = 1.0;
}
size_t save(const Parameters * const para,
float * tmp) const override
{
const nnReal* const bias = para->B(ID);
const nnReal* const weight = para->W(ID);
for(Uint o=0; o<size; ++o) *(tmp++) = (float) weight[o];
for(Uint o=0; o<size; ++o) *(tmp++) = (float) bias[o];
return 2*size;
}
size_t restart(const Parameters * const para,
const float * tmp) const override
{
nnReal* const bias = para->B(ID);
nnReal* const weight = para->W(ID);
for (Uint n=0; n<size; ++n) weight[n] = (nnReal) *(tmp++);
for (Uint n=0; n<size; ++n) bias[n] = (nnReal) *(tmp++);
return 2*size;
}
};
class ResidualLayer: public Layer
{
public:
ResidualLayer(Uint _ID, Uint _N): Layer(_ID,_N,false) { }
std::string printSpecs() const override {
return "(" + std::to_string(ID) + ") Residual Connection of size:"
+ std::to_string(size) + "\n";
}
void requiredParameters(std::vector<Uint>& nWeight,
std::vector<Uint>& nBiases ) const override {
nWeight.push_back(0);
nBiases.push_back(0);
}
void requiredActivation(std::vector<Uint>& sizes,
std::vector<Uint>& bOutputs,
std::vector<Uint>& bInputs) const override {
sizes.push_back(size);
bOutputs.push_back(bOutput);
bInputs.push_back(false);
}
void biasInitialValues(const std::vector<Real> init) override { }
void forward( const Activation*const prev,
const Activation*const curr,
const Parameters*const para) const override
{
nnReal* const ret = curr->Y(ID);
std::memset( ret, 0, size * sizeof(nnReal) );
for (Uint i=1; i<=2; ++i)
{
const Uint sizeInp = std::min(curr->sizes[ID-i], size);
const nnReal* const inputs = curr->Y(ID-i);
#pragma omp simd aligned(ret, inputs : VEC_WIDTH)
for (Uint j=0; j<sizeInp; ++j) ret[j] += inputs[j];
}
}
void backward( const Activation*const prev,
const Activation*const curr,
const Activation*const next,
const Parameters*const grad,
const Parameters*const para) const override {
const nnReal* const errors = curr->E(ID);
for (Uint i=1; i<=2; ++i) {
const Uint sizeInp = std::min(curr->sizes[ID-i], size);
memcpy( curr->E(ID-i), errors, sizeInp * sizeof(nnReal) );
}
}
void initialize(std::mt19937& G, const Parameters*const W,
Real initializationFac) const override { }
size_t save(const Parameters * const para,
float * tmp) const override { return 0; }
size_t restart(const Parameters * const para,
const float * tmp) const override { return 0; }
};
class ParamLayer: public Layer
{
const std::unique_ptr<Function> func;
std::vector<nnReal> initVals;
public:
ParamLayer(Uint _ID, Uint _size, std::string funcType, std::vector<Real>init)
: Layer(_ID, _size, true), func(makeFunction(funcType)) {
biasInitialValues(init);
}
std::string printSpecs() const override {
std::string ret = "(" + std::to_string(ID) + ") Parameter Layer of size:"
+ std::to_string(size) + ". Initialized:";
for(Uint i=0; i<size; ++i) { ret += " " + std::to_string(initVals[i]); }
return ret + "\n";
}
void requiredParameters(std::vector<Uint>& nWeight,
std::vector<Uint>& nBiases ) const override {
nWeight.push_back(0); nBiases.push_back(size);
}
void requiredActivation(std::vector<Uint>& sizes,
std::vector<Uint>& bOutputs,
std::vector<Uint>& bInputs) const override {
sizes.push_back(size); bOutputs.push_back(true); bInputs.push_back(bInput);
}
void biasInitialValues(const std::vector<Real> init) override {
if(init.size() != size) _die("size of init:%lu.", init.size());
initVals.resize(size, 0);
std::copy(init.begin(), init.end(), initVals.begin());
}
void forward( const Activation*const prev,
const Activation*const curr,
const Parameters*const para) const override
{
nnReal* const inputs = curr->X(ID);
nnReal* const output = curr->Y(ID);
const nnReal* const bias = para->B(ID);
for (Uint n=0; n<size; ++n) {
inputs[n] = bias[n];
output[n] = func->eval(bias[n]);
}
}
void backward( const Activation*const prev,
const Activation*const curr,
const Activation*const next,
const Parameters*const grad,
const Parameters*const para) const override
{
const nnReal* const inputs = curr->X(ID);
const nnReal* const outval = curr->Y(ID);
nnReal* const deltas = curr->E(ID);
if(grad == nullptr)
{
for(Uint o=0; o<size; ++o)
deltas[o] *= func->evalDiff(inputs[o], outval[o]);
}
else
{
nnReal* const grad_b = grad->B(ID);
for(Uint o=0; o<size; ++o) {
deltas[o] *= func->evalDiff(inputs[o], outval[o]);
grad_b[o] += deltas[o];
}
}
}
void initialize(std::mt19937& G, const Parameters*const W,
Real initializationFac) const override
{
nnReal* const biases = W->B(ID);
for(Uint o=0; o<size; ++o) biases[o] = func->inverse(initVals[o]);
}
size_t save(const Parameters * const para,
float * tmp) const override
{
const nnReal* const bias = para->B(ID);
for (Uint n=0; n<size; ++n) tmp[n] = (float) bias[n];
return size;
}
size_t restart(const Parameters * const para,
const float * tmp) const override
{
nnReal* const bias = para->B(ID);
for (Uint n=0; n<size; ++n) bias[n] = (nnReal) tmp[n];
return size;
}
};
} // end namespace smarties
#endif // smarties_Quadratic_term_h
|
elementwise_add_arm_func.h | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef ELEMENTWISEADD_OP
#pragma once
#include "operators/math/elementwise_op_function.h"
#include "operators/op_param.h"
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#include <arm_neon.h>
#endif
namespace paddle_mobile {
namespace operators {
template <typename T>
inline void ElementwiseAddCompute(const ElementwiseAddParam<CPU> ¶m) {
const framework::Tensor *input_x = param.InputX();
const framework::Tensor *input_y = param.InputY();
framework::Tensor *Out = param.Out();
int axis = param.Axis();
const auto &x_dims = input_x->dims();
const auto &y_dims = input_y->dims();
/// axis = -1 represent the last dimensions.
axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis);
size_t batch = 1;
size_t channels = 1;
size_t elementwise_num = 1;
for (int i = 0; i < axis; ++i) {
batch *= x_dims[i];
}
for (int i = 0; i < y_dims.size(); ++i) {
channels *= y_dims[i];
}
for (int i = y_dims.size() + axis; i < x_dims.size(); ++i) {
elementwise_num *= x_dims[i];
}
const float *bias_data = input_y->data<float>();
const float *input_data = input_x->data<float>();
float *output_data = Out->mutable_data<float>();
#pragma omp parallel for collapse(2)
for (int i = 0; i < batch; ++i) {
for (int j = 0; j < channels; ++j) {
size_t offset = (i * channels + j) * elementwise_num;
const float *input = input_data + offset;
const float bias = bias_data[j];
float *output = output_data + offset;
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
int loop = elementwise_num >> 0x4;
int remain = elementwise_num & 0xF;
float32x4_t rb = vdupq_n_f32(bias);
for (int k = 0; k < loop; ++k) {
float32x4_t r0 = vld1q_f32(input);
float32x4_t r1 = vld1q_f32(input + 4);
float32x4_t r2 = vld1q_f32(input + 8);
float32x4_t r3 = vld1q_f32(input + 12);
r0 = vaddq_f32(r0, rb);
r1 = vaddq_f32(r1, rb);
r2 = vaddq_f32(r2, rb);
r3 = vaddq_f32(r3, rb);
vst1q_f32(output, r0);
vst1q_f32(output + 4, r1);
vst1q_f32(output + 8, r2);
vst1q_f32(output + 12, r3);
input += 16;
output += 16;
}
if (remain >= 8) {
float32x4_t r0 = vld1q_f32(input);
float32x4_t r1 = vld1q_f32(input + 4);
r0 = vaddq_f32(r0, rb);
r1 = vaddq_f32(r1, rb);
vst1q_f32(output, r0);
vst1q_f32(output + 4, r1);
input += 8;
output += 8;
remain -= 8;
}
if (remain >= 4) {
float32x4_t r0 = vld1q_f32(input);
r0 = vaddq_f32(r0, rb);
vst1q_f32(output, r0);
input += 4;
output += 4;
remain -= 4;
}
if (remain > 0) {
float32x4_t r0 = vld1q_f32(input);
r0 = vaddq_f32(r0, rb);
switch (remain) {
case 1:
vst1q_lane_f32(output, r0, 0);
break;
case 2:
vst1_f32(output, vget_low_f32(r0));
break;
case 3:
vst1_f32(output, vget_low_f32(r0));
vst1q_lane_f32(output, r0, 2);
break;
}
}
#else
for (int k = 0; k < elementwise_num; ++k) {
output[k] = input[k] + bias;
}
#endif // __ARM_NEON__
}
}
}
template class ElementwiseAddKernel<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
#endif
|
round_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
int ref_round_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread)
{
// dims size = 2 or 3
if (input_tensor->dim_num < 4)
{
float* input_data = (float*)input_tensor->data;
float* out_data = (float*)output_tensor->data;
int total_size = input_tensor->elem_num;
for (int i = 0; i < total_size; i++)
{
input_data[i] = round(out_data[i]);
}
return 0;
}
// dims size 3
else if (input_tensor->dim_num == 4)
{
int w = input_tensor->dims[3];
int h = output_tensor->dims[2];
int channels = input_tensor->dims[1];
int size = h * w;
int c_step = h * w;
float* input_data = (float*)input_tensor->data;
float* out_data = (float*)output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = input_data + c_step * q;
float* dst = out_data + c_step * q;
for (int i = 0; i < size; i++)
{
dst[i] = round(src[i]);
}
}
return 0;
}
return -1;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
// exec_node->inplace_map[0] = 0;
// exec_node->inplace_map[1] = 0;
// exec_node->inplace_map_num = 1;
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
// exec_node->inplace_map_num = 0;
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* output_tensor;
int layout = ir_graph->graph_layout;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
// inplace inference
// if(input_tensor->data != output_tensor->data)
// {
// TLOG_ERR("input and output are not the same mem\n");
// set_tengine_errno(EFAULT);
// return -1;
// }
int ret = ref_round_fp32(input_tensor, output_tensor, exec_graph->num_thread);
if (ret != 0)
return -1;
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_round_ref_op()
{
return register_builtin_node_ops(OP_ROUND, &hcl_node_ops);
}
int unregister_round_ref_op()
{
return unregister_builtin_node_ops(OP_ROUND, &hcl_node_ops);
}
|
GB_deserialize_from_blob.c | //------------------------------------------------------------------------------
// GB_deserialize_from_blob: uncompress a set of blocks from the blob
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Decompress a single array from a set of compressed blocks in the blob. If
// the input data is mangled, this method is still safe, since it performs the
// bare minimum sanity checks to ensure no out-of-bounds indexing of arrays.
// However, the contents of output array are not fully checked. This step is
// done by GB_deserialize, if requested.
#include "GB.h"
#include "GB_serialize.h"
#include "GB_lz4.h"
#define GB_FREE_ALL \
{ \
GB_FREE (&X, X_size) ; \
}
GrB_Info GB_deserialize_from_blob
(
// output:
GB_void **X_handle, // uncompressed output array
size_t *X_size_handle, // size of X as allocated
// input:
int64_t X_len, // size of X in bytes
const GB_void *blob, // serialized blob of size blob_size
size_t blob_size,
int64_t *Sblocks, // array of size nblocks
int32_t nblocks, // # of compressed blocks for this array
int32_t method, // compression method used for each block
// input/output:
size_t *s_handle, // where to read from the blob
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT (blob != NULL) ;
ASSERT (s_handle != NULL) ;
ASSERT (X_handle != NULL) ;
ASSERT (X_size_handle != NULL) ;
(*X_handle) = NULL ;
(*X_size_handle) = 0 ;
//--------------------------------------------------------------------------
// parse the method
//--------------------------------------------------------------------------
int32_t algo, level ;
GB_serialize_method (&algo, &level, method) ;
//--------------------------------------------------------------------------
// allocate the output array
//--------------------------------------------------------------------------
size_t X_size = 0 ;
GB_void *X = GB_MALLOC (X_len, GB_void, &X_size) ; // OK
if (X == NULL)
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// decompress the blocks from the blob
//--------------------------------------------------------------------------
size_t s = (*s_handle) ;
bool ok = true ;
if (algo == GxB_COMPRESSION_NONE)
{
//----------------------------------------------------------------------
// no compression; the array is held in a single block
//----------------------------------------------------------------------
if (nblocks > 1 || Sblocks [0] != X_len || s + X_len > blob_size)
{
// blob is invalid: guard against an unsafe memcpy
ok = false ;
}
else
{
// copy the blob into the array X. This is now safe and secure.
// The contents of X are not yet checked, however.
GB_memcpy (X, blob + s, X_len, nthreads_max) ;
}
}
else if (algo == GxB_COMPRESSION_LZ4 || algo == GxB_COMPRESSION_LZ4HC)
{
//----------------------------------------------------------------------
// LZ4 / LZ4HC compression
//----------------------------------------------------------------------
int nthreads = GB_IMIN (nthreads_max, nblocks) ;
int32_t blockid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic) \
reduction(&&:ok)
for (blockid = 0 ; blockid < nblocks ; blockid++)
{
// get the start and end of the compressed and uncompressed blocks
int64_t kstart, kend ;
GB_PARTITION (kstart, kend, X_len, blockid, nblocks) ;
int64_t s_start = (blockid == 0) ? 0 : Sblocks [blockid-1] ;
int64_t s_end = Sblocks [blockid] ;
size_t s_size = s_end - s_start ;
size_t d_size = kend - kstart ;
// ensure s_start, s_end, kstart, and kend are all valid,
// to avoid accessing arrays out of bounds, if input is corrupted.
if (kstart < 0 || kend < 0 || s_start < 0 || s_end < 0 ||
kstart >= kend || s_start >= s_end || s_size > INT32_MAX ||
s + s_start > blob_size || s + s_end > blob_size ||
kstart > X_len || kend > X_len || d_size > INT32_MAX)
{
// blob is invalid
ok = false ;
}
else
{
// uncompress the compressed block of size s_size
// from blob [s + s_start:s_end-1] into X [kstart:kend-1].
// This is safe and secure so far. The contents of X are
// not yet checked, however. That step is done in
// GB_deserialize, if requested.
const char *src = (const char *) (blob + s + s_start) ;
char *dst = (char *) (X + kstart) ;
int src_size = (int) s_size ;
int dst_size = (int) d_size ;
int u = LZ4_decompress_safe (src, dst, src_size, dst_size) ;
if (u != dst_size)
{
// blob is invalid
ok = false ;
}
}
}
}
else
{
// unknown compression method
ok = false ;
}
if (!ok)
{
// decompression failure; blob is invalid
GB_FREE_ALL ;
return (GrB_INVALID_OBJECT) ;
}
//--------------------------------------------------------------------------
// return result: X, its size, and updated index into the blob
//--------------------------------------------------------------------------
(*X_handle) = X ;
(*X_size_handle) = X_size ;
if (nblocks > 0)
{
s += Sblocks [nblocks-1] ;
}
(*s_handle) = s ;
return (GrB_SUCCESS) ;
}
|
workflow.h | #ifndef SRC_WORKFLOW_H
#define SRC_WORKFLOW_H
// #define R_BUILD
#ifdef R_BUILD
#include <Rcpp.h>
#include <RcppEigen.h>
// [[Rcpp::depends(RcppEigen)]]
using namespace Rcpp;
#else
#include <Eigen/Eigen>
#include "List.h"
#endif
#include <iostream>
#include <vector>
#include "Algorithm.h"
#include "Data.h"
#include "Metric.h"
#include "abessOpenMP.h"
#include "path.h"
#include "screening.h"
#include "utilities.h"
typedef Eigen::Triplet<double> triplet;
using namespace Eigen;
using namespace std;
// T1 for y, XTy, XTone
// T2 for beta
// T3 for coef0
// T4 for X
// <Eigen::VectorXd, Eigen::VectorXd, double, Eigen::MatrixXd> for Univariate Dense
// <Eigen::VectorXd, Eigen::VectorXd, double, Eigen::SparseMatrix<double> > for Univariate Sparse
// <Eigen::MatrixXd, Eigen::MatrixXd, Eigen::VectorXd, Eigen::MatrixXd> for Multivariable Dense
// <Eigen::MatrixXd, Eigen::MatrixXd, Eigen::VectorXd, Eigen::SparseMatrix<double> > for Multivariable Sparse
template <class T1, class T2, class T3, class T4>
List abessWorkflow(T4 &x, T1 &y, int n, int p, int normalize_type, Eigen::VectorXd weight, int algorithm_type,
int path_type, bool is_warm_start, int ic_type, double ic_coef, int Kfold, Parameters parameters,
int screening_size, Eigen::VectorXi g_index, bool early_stop, int thread, bool sparse_matrix,
Eigen::VectorXi &cv_fold_id, Eigen::VectorXi &A_init,
vector<Algorithm<T1, T2, T3, T4> *> algorithm_list) {
#ifndef R_BUILD
std::srand(123);
#endif
int algorithm_list_size = algorithm_list.size();
int beta_size = algorithm_list[0]->get_beta_size(n, p); // number of candidate param
// data packing
Data<T1, T2, T3, T4> data(x, y, normalize_type, weight, g_index, sparse_matrix, beta_size);
if (algorithm_list[0]->model_type == 1 || algorithm_list[0]->model_type == 5) {
add_weight(data.x, data.y, data.weight);
}
// screening
Eigen::VectorXi screening_A;
if (screening_size >= 0) {
screening_A = screening<T1, T2, T3, T4>(data, algorithm_list, screening_size, beta_size,
parameters.lambda_list(0), A_init);
}
// For CV:
// 1:mask
// 2:warm start save
// 3:group_XTX
Metric<T1, T2, T3, T4> *metric = new Metric<T1, T2, T3, T4>(ic_type, ic_coef, Kfold);
if (Kfold > 1) {
metric->set_cv_train_test_mask(data, data.n, cv_fold_id);
metric->set_cv_init_fit_arg(beta_size, data.M);
// metric->set_cv_initial_model_param(Kfold, data.p);
// metric->set_cv_initial_A(Kfold, data.p);
// metric->set_cv_initial_coef0(Kfold, data.p);
// if (model_type == 1)
// metric->cal_cv_group_XTX(data);
}
// calculate loss for each parameter parameter combination
vector<Result<T2, T3>> result_list(Kfold);
if (path_type == 1) {
#pragma omp parallel for
for (int i = 0; i < Kfold; i++) {
sequential_path_cv<T1, T2, T3, T4>(data, algorithm_list[i], metric, parameters, early_stop, i, A_init,
result_list[i]);
}
} else {
// if (algorithm_type == 5 || algorithm_type == 3)
// {
// double log_lambda_min = log(max(lambda_min, 1e-5));
// double log_lambda_max = log(max(lambda_max, 1e-5));
// result = pgs_path(data, algorithm, metric, s_min, s_max, log_lambda_min, log_lambda_max, powell_path,
// nlambda);
// }
gs_path<T1, T2, T3, T4>(data, algorithm_list, metric, parameters, A_init, result_list);
}
for (int k = 0; k < Kfold; k++) {
algorithm_list[k]->clear_setting();
}
// Get bestmodel index && fit bestmodel
int min_loss_index = 0;
int sequence_size = (parameters.sequence).size();
Eigen::Matrix<T2, Dynamic, 1> beta_matrix(sequence_size, 1);
Eigen::Matrix<T3, Dynamic, 1> coef0_matrix(sequence_size, 1);
Eigen::Matrix<VectorXd, Dynamic, 1> bd_matrix(sequence_size, 1);
Eigen::MatrixXd ic_matrix(sequence_size, 1);
Eigen::MatrixXd test_loss_sum = Eigen::MatrixXd::Zero(sequence_size, 1);
Eigen::MatrixXd train_loss_matrix(sequence_size, 1);
Eigen::MatrixXd effective_number_matrix(sequence_size, 1);
if (Kfold == 1) {
beta_matrix = result_list[0].beta_matrix;
coef0_matrix = result_list[0].coef0_matrix;
ic_matrix = result_list[0].ic_matrix;
train_loss_matrix = result_list[0].train_loss_matrix;
effective_number_matrix = result_list[0].effective_number_matrix;
ic_matrix.col(0).minCoeff(&min_loss_index);
} else {
for (int i = 0; i < Kfold; i++) {
test_loss_sum += result_list[i].test_loss_matrix;
}
test_loss_sum /= ((double)Kfold);
test_loss_sum.col(0).minCoeff(&min_loss_index);
Eigen::VectorXi used_algorithm_index = Eigen::VectorXi::Zero(algorithm_list_size);
// refit on full data
#pragma omp parallel for
for (int ind = 0; ind < sequence_size; ind++) {
int support_size = parameters.sequence(ind).support_size;
double lambda = parameters.sequence(ind).lambda;
int algorithm_index = omp_get_thread_num();
used_algorithm_index(algorithm_index) = 1;
T2 beta_init;
T3 coef0_init;
Eigen::VectorXi A_init; // clear A_init
coef_set_zero(beta_size, data.M, beta_init, coef0_init);
Eigen::VectorXd bd_init = Eigen::VectorXd::Zero(data.g_num);
// warmstart from CV's result
for (int j = 0; j < Kfold; j++) {
beta_init = beta_init + result_list[j].beta_matrix(ind) / Kfold;
coef0_init = coef0_init + result_list[j].coef0_matrix(ind) / Kfold;
bd_init = bd_init + result_list[j].bd_matrix(ind) / Kfold;
}
algorithm_list[algorithm_index]->update_sparsity_level(support_size);
algorithm_list[algorithm_index]->update_lambda_level(lambda);
algorithm_list[algorithm_index]->update_beta_init(beta_init);
algorithm_list[algorithm_index]->update_coef0_init(coef0_init);
algorithm_list[algorithm_index]->update_bd_init(bd_init);
algorithm_list[algorithm_index]->update_A_init(A_init, data.g_num);
algorithm_list[algorithm_index]->fit(data.x, data.y, data.weight, data.g_index, data.g_size, data.n, data.p,
data.g_num);
beta_matrix(ind) = algorithm_list[algorithm_index]->get_beta();
coef0_matrix(ind) = algorithm_list[algorithm_index]->get_coef0();
train_loss_matrix(ind) = algorithm_list[algorithm_index]->get_train_loss();
ic_matrix(ind) = metric->ic(data.n, data.M, data.g_num, algorithm_list[algorithm_index]);
effective_number_matrix(ind) = algorithm_list[algorithm_index]->get_effective_number();
}
for (int i = 0; i < algorithm_list_size; i++) {
if (used_algorithm_index(i) == 1) {
algorithm_list[i]->clear_setting();
}
}
}
// best_fit_result (output)
double best_support_size = parameters.sequence(min_loss_index).support_size;
double best_lambda = parameters.sequence(min_loss_index).lambda;
T2 best_beta;
T3 best_coef0;
double best_train_loss, best_ic, best_test_loss;
best_beta = beta_matrix(min_loss_index);
best_coef0 = coef0_matrix(min_loss_index);
best_train_loss = train_loss_matrix(min_loss_index);
best_ic = ic_matrix(min_loss_index);
best_test_loss = test_loss_sum(min_loss_index);
// Restore best_fit_result for normal
restore_for_normal<T2, T3>(best_beta, best_coef0, beta_matrix, coef0_matrix, sparse_matrix, data.normalize_type,
data.n, data.x_mean, data.y_mean, data.x_norm);
// List result;
List out_result;
#ifdef R_BUILD
out_result = List::create(
Named("beta") = best_beta, Named("coef0") = best_coef0, Named("train_loss") = best_train_loss,
Named("ic") = best_ic, Named("lambda") = best_lambda, Named("beta_all") = beta_matrix,
Named("coef0_all") = coef0_matrix, Named("train_loss_all") = train_loss_matrix, Named("ic_all") = ic_matrix,
Named("effective_number_all") = effective_number_matrix, Named("test_loss_all") = test_loss_sum);
if (path_type == 2) {
out_result.push_back(parameters.support_size_list, "sequence");
}
#else
out_result.add("beta", best_beta);
out_result.add("coef0", best_coef0);
out_result.add("train_loss", best_train_loss);
out_result.add("test_loss", best_test_loss);
out_result.add("ic", best_ic);
out_result.add("lambda", best_lambda);
// out_result.add("beta_all", beta_matrix);
// out_result.add("coef0_all", coef0_matrix);
// out_result.add("train_loss_all", train_loss_matrix);
// out_result.add("ic_all", ic_matrix);
// out_result.add("test_loss_all", test_loss_sum);
#endif
// Restore best_fit_result for screening
if (screening_size >= 0) {
T2 beta_screening_A;
T2 beta;
T3 coef0;
beta_size = algorithm_list[0]->get_beta_size(n, p);
coef_set_zero(beta_size, data.M, beta, coef0);
#ifndef R_BUILD
out_result.get_value_by_name("beta", beta_screening_A);
slice_restore(beta_screening_A, screening_A, beta);
out_result.add("beta", beta);
out_result.add("screening_A", screening_A);
#else
beta_screening_A = out_result["beta"];
slice_restore(beta_screening_A, screening_A, beta);
out_result["beta"] = beta;
out_result.push_back(screening_A, "screening_A");
#endif
}
delete metric;
return out_result;
}
#endif // SRC_WORKFLOW_H
|
rotlet_direct.c | #include "mex.h"
#include "math.h"
#define X prhs[0] // Source locations
#define F prhs[1] // Source strengths
#define U plhs[0] // Output
#ifndef VERBOSE
#define VERBOSE 0
#endif
#define PI 3.141592653589793
inline void cross(double * a,double * b, double *c)
{
c[0] = a[1]*b[2] - a[2]*b[1];
c[1] = a[2]*b[0] - a[0]*b[2];
c[2] = a[0]*b[1] - a[1]*b[0];
}
/* no input checking is done */
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] )
{
// input dims
const int N = mxGetM(X);
const double* restrict x = mxGetPr(X);
const double* restrict f = mxGetPr(F);
U = mxCreateDoubleMatrix(N, 3, mxREAL);
double* restrict u = mxGetPr(U);
if(VERBOSE)
mexPrintf("[FS Rotlet Direct ] MEX N=%d ",N);
// call kernel
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int m=0; m<N; m++)
{
double p[] = {0,0,0};
double fxr[3];
for(int n = 0; n<N; n++){
double fn[] = {f[n], f[n+N], f[n+2*N]};
double r[] = {x[m]-x[n], x[m+N]-x[n+N],x[m+2*N]-x[n+2*N]};
cross(fn,r,fxr);
double ri = sqrt(r[0]*r[0]+r[1]*r[1]+r[2]*r[2]);
double ri3 = 1.0/(ri*ri*ri);
if(m==n)
continue;
p[0] += ri3*fxr[0];
p[1] += ri3*fxr[1];
p[2] += ri3*fxr[2];
}
u[m ] = p[0];
u[m+ N] = p[1];
u[m+2*N] = p[2];
}
}
|
blas_dh.c | /******************************************************************************
* Copyright (c) 1998 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_Euclid.h"
/* #include "blas_dh.h" */
#undef __FUNC__
#define __FUNC__ "matvec_euclid_seq"
void matvec_euclid_seq(HYPRE_Int n, HYPRE_Int *rp, HYPRE_Int *cval, HYPRE_Real *aval, HYPRE_Real *x, HYPRE_Real *y)
{
START_FUNC_DH
HYPRE_Int i, j;
HYPRE_Int from, to, col;
HYPRE_Real sum;
if (np_dh > 1) SET_V_ERROR("only for sequential case!\n");
#ifdef USING_OPENMP_DH
#pragma omp parallel private(j, col, sum, from, to) \
default(shared) \
firstprivate(n, rp, cval, aval, x, y)
#endif
{
#ifdef USING_OPENMP_DH
#pragma omp for schedule(static)
#endif
for (i=0; i<n; ++i) {
sum = 0.0;
from = rp[i];
to = rp[i+1];
for (j=from; j<to; ++j) {
col = cval[j];
sum += (aval[j]*x[col]);
}
y[i] = sum;
}
}
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "Axpy"
void Axpy(HYPRE_Int n, HYPRE_Real alpha, HYPRE_Real *x, HYPRE_Real *y)
{
START_FUNC_DH
HYPRE_Int i;
#ifdef USING_OPENMP_DH
#pragma omp parallel for schedule(static) firstprivate(alpha, x, y) \
private(i)
#endif
for (i=0; i<n; ++i) {
y[i] = alpha*x[i] + y[i];
}
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "CopyVec"
void CopyVec(HYPRE_Int n, HYPRE_Real *xIN, HYPRE_Real *yOUT)
{
START_FUNC_DH
HYPRE_Int i;
#ifdef USING_OPENMP_DH
#pragma omp parallel for schedule(static) firstprivate(yOUT, xIN) \
private(i)
#endif
for (i=0; i<n; ++i) {
yOUT[i] = xIN[i];
}
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "ScaleVec"
void ScaleVec(HYPRE_Int n, HYPRE_Real alpha, HYPRE_Real *x)
{
START_FUNC_DH
HYPRE_Int i;
#ifdef USING_OPENMP_DH
#pragma omp parallel for schedule(static) firstprivate(alpha, x) \
private(i)
#endif
for (i=0; i<n; ++i) {
x[i] *= alpha;
}
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "InnerProd"
HYPRE_Real InnerProd(HYPRE_Int n, HYPRE_Real *x, HYPRE_Real *y)
{
START_FUNC_DH
HYPRE_Real result, local_result = 0.0;
HYPRE_Int i;
#ifdef USING_OPENMP_DH
#pragma omp parallel for schedule(static) firstprivate(x, y) \
private(i) \
reduction(+:local_result)
#endif
for (i=0; i<n; ++i) {
local_result += x[i] * y[i];
}
if (np_dh > 1) {
hypre_MPI_Allreduce(&local_result, &result, 1, hypre_MPI_REAL, hypre_MPI_SUM, comm_dh);
} else {
result = local_result;
}
END_FUNC_VAL(result)
}
#undef __FUNC__
#define __FUNC__ "Norm2"
HYPRE_Real Norm2(HYPRE_Int n, HYPRE_Real *x)
{
START_FUNC_DH
HYPRE_Real result, local_result = 0.0;
HYPRE_Int i;
#ifdef USING_OPENMP_DH
#pragma omp parallel for schedule(static) firstprivate(x) \
private(i) \
reduction(+:local_result)
#endif
for (i=0; i<n; ++i) {
local_result += (x[i]*x[i]);
}
if (np_dh > 1) {
hypre_MPI_Allreduce(&local_result, &result, 1, hypre_MPI_REAL, hypre_MPI_SUM, comm_dh);
} else {
result = local_result;
}
result = sqrt(result);
END_FUNC_VAL(result)
}
|
WaveFunctionComponent.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Ken Esler, kpesler@gmail.com, University of Illinois at Urbana-Champaign
// Miguel Morales, moralessilva2@llnl.gov, Lawrence Livermore National Laboratory
// Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign
// Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
// Raymond Clay III, j.k.rofling@gmail.com, Lawrence Livermore National Laboratory
// Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory
//
// File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
//////////////////////////////////////////////////////////////////////////////////////
#ifndef QMCPLUSPLUS_WAVEFUNCTIONCOMPONENT_H
#define QMCPLUSPLUS_WAVEFUNCTIONCOMPONENT_H
#include "Message/Communicate.h"
#include "Configuration.h"
#include "Particle/ParticleSet.h"
#include "Particle/VirtualParticleSet.h"
#include "Particle/DistanceTableData.h"
#include "OhmmsData/RecordProperty.h"
#include "QMCWaveFunctions/OrbitalSetTraits.h"
#include "Particle/MCWalkerConfiguration.h"
#include "type_traits/template_types.hpp"
#ifdef QMC_CUDA
#include "type_traits/CUDATypes.h"
#endif
/**@file WaveFunctionComponent.h
*@brief Declaration of WaveFunctionComponent
*/
namespace qmcplusplus
{
#ifdef QMC_CUDA
struct NLjob
{
int walker;
int elec;
int numQuadPoints;
NLjob(int w, int e, int n) : walker(w), elec(e), numQuadPoints(n) {}
};
#endif
///forward declaration of WaveFunctionComponent
class WaveFunctionComponent;
///forward declaration of DiffWaveFunctionComponent
class DiffWaveFunctionComponent;
typedef WaveFunctionComponent* WaveFunctionComponentPtr;
typedef DiffWaveFunctionComponent* DiffWaveFunctionComponentPtr;
/**@defgroup WaveFunctionComponent group
* @brief Classes which constitute a many-body trial wave function
*
* A many-body trial wave function is
* \f[
\Psi(\{ {\bf R}\}) = \prod_i \psi_{i}(\{ {\bf R}\}),
* \f]
* where \f$\Psi\f$s are represented by
* the derived classes from WaveFunctionComponent.
*/
/** @ingroup WaveFunctionComponent
* @brief An abstract class for a component of a many-body trial wave function
*
* mw_ prefix is a function name signature indicating it is for handling a batch of WaveFunctionComponent objects
* which are required to be base class pointers of the same derived class type.
* all the mw_ routines must be implemented in a way either stateless or maintains states of every walker.
*/
struct WaveFunctionComponent : public QMCTraits
{
/** enum for a update mode */
enum
{
ORB_PBYP_RATIO, /*!< particle-by-particle ratio only */
ORB_PBYP_ALL, /*!< particle-by-particle, update Value-Gradient-Laplacian */
ORB_PBYP_PARTIAL, /*!< particle-by-particle, update Value and Grdient */
ORB_WALKER, /*!< walker update */
ORB_ALLWALKER /*!< all walkers update */
};
typedef ParticleAttrib<ValueType> ValueVectorType;
typedef ParticleAttrib<GradType> GradVectorType;
typedef ParticleSet::Walker_t Walker_t;
typedef Walker_t::WFBuffer_t WFBufferType;
typedef Walker_t::Buffer_t BufferType;
typedef OrbitalSetTraits<RealType>::ValueMatrix_t RealMatrix_t;
typedef OrbitalSetTraits<ValueType>::ValueMatrix_t ValueMatrix_t;
typedef OrbitalSetTraits<ValueType>::GradMatrix_t GradMatrix_t;
typedef OrbitalSetTraits<ValueType>::HessType HessType;
typedef OrbitalSetTraits<ValueType>::HessVector_t HessVector_t;
// the value type for log(psi)
using LogValueType = std::complex<QTFull::RealType>;
// the value type for psi(r')/psi(r)
using PsiValueType = QTFull::ValueType;
/** flag to set the optimization mode */
bool IsOptimizing;
/** boolean to set optimization
*
* If true, this object is actively modified during optimization
*/
bool Optimizable;
/** true, if this component is fermionic */
bool is_fermionic;
/** current update mode */
int UpdateMode;
/** current \f$\log\phi \f$
*/
LogValueType LogValue;
/** Pointer to the differential WaveFunctionComponent of this object
*
* If dPsi=0, this WaveFunctionComponent is constant with respect to the optimizable variables
*/
DiffWaveFunctionComponentPtr dPsi;
/** A vector for \f$ \frac{\partial \nabla \log\phi}{\partial \alpha} \f$
*/
GradVectorType dLogPsi;
/** A vector for \f$ \frac{\partial \nabla^2 \log\phi}{\partial \alpha} \f$
*/
ValueVectorType d2LogPsi;
/** Name of the class derived from WaveFunctionComponent
*/
std::string ClassName;
///list of variables this WaveFunctionComponent handles
opt_variables_type myVars;
///Bytes in WFBuffer
size_t Bytes_in_WFBuffer;
/// default constructor
WaveFunctionComponent();
//WaveFunctionComponent(const WaveFunctionComponent& old);
///default destructor
virtual ~WaveFunctionComponent() {}
inline void setOptimizable(bool optimizeit) { Optimizable = optimizeit; }
///assign a differential WaveFunctionComponent
virtual void setDiffOrbital(DiffWaveFunctionComponentPtr d);
///assembles the full value
PsiValueType getValue() const { return LogToValue<PsiValueType>::convert(LogValue); }
/** check in optimizable parameters
* @param active a super set of optimizable variables
*
* Add the paramemters this WaveFunctionComponent manage to active.
*/
virtual void checkInVariables(opt_variables_type& active) = 0;
/** check out optimizable variables
*
* Update myVars index map
*/
virtual void checkOutVariables(const opt_variables_type& active) = 0;
/** reset the parameters during optimizations
*/
virtual void resetParameters(const opt_variables_type& active) = 0;
/** print the state, e.g., optimizables */
virtual void reportStatus(std::ostream& os) = 0;
/** reset properties, e.g., distance tables, for a new target ParticleSet
* @param P ParticleSet
*/
virtual void resetTargetParticleSet(ParticleSet& P) = 0;
/** evaluate the value of the WaveFunctionComponent from scratch
* @param P active ParticleSet
* @param G Gradients, \f$\nabla\ln\Psi\f$
* @param L Laplacians, \f$\nabla^2\ln\Psi\f$
* @return the log value
*
* Mainly for walker-by-walker move. The initial stage of particle-by-particle
* move also uses this.
*/
virtual LogValueType evaluateLog(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L) = 0;
/** evaluate from scratch the same type WaveFunctionComponent of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param G_list the list of Gradients pointers in a walker batch, \f$\nabla\ln\Psi\f$
* @param L_list the list of Laplacians pointers in a walker batch, \f$\nabla^2\ln\Psi\f$
* @@param values the log WF values of walkers in a batch
*/
virtual void mw_evaluateLog(const std::vector<WaveFunctionComponent*>& WFC_list,
const std::vector<ParticleSet*>& P_list,
const std::vector<ParticleSet::ParticleGradient_t*>& G_list,
const std::vector<ParticleSet::ParticleLaplacian_t*>& L_list)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
WFC_list[iw]->evaluateLog(*P_list[iw], *G_list[iw], *L_list[iw]);
}
/** recompute the value of the WaveFunctionComponents which require critical accuracy.
* needed for Slater Determinants but not needed for most types of WaveFunctionComponents
*/
virtual void recompute(ParticleSet& P) {}
// virtual void evaluateHessian(ParticleSet& P, IndexType iat, HessType& grad_grad_psi)
// {
// APP_ABORT("WaveFunctionComponent::evaluateHessian is not implemented");
// }
virtual void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi_all)
{
APP_ABORT("WaveFunctionComponent::evaluateHessian is not implemented in " + ClassName + " class.");
}
/** return the current gradient for the iat-th particle
* @param P quantum particle set
* @param iat particle index
* @return the gradient of the iat-th particle
*/
virtual GradType evalGrad(ParticleSet& P, int iat)
{
APP_ABORT("WaveFunctionComponent::evalGradient is not implemented in " + ClassName + " class.");
return GradType();
}
/** return the current spin gradient for the iat-th particle
* Default implementation assumes that WaveFunctionComponent does not explicitly depend on Spin.
* @param P quantum particle set
* @param iat particle index
* @return the spin gradient of the iat-th particle
*/
virtual GradType evalGradWithSpin(ParticleSet& P, int iat, LogValueType& spingrad) { return evalGrad(P, iat); }
/** compute the current gradients for the iat-th particle of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param grad_now the list of gradients in a walker batch, \f$\nabla\ln\Psi\f$
*/
virtual void mw_evalGrad(const std::vector<WaveFunctionComponent*>& WFC_list,
const std::vector<ParticleSet*>& P_list,
int iat,
std::vector<GradType>& grad_now)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
grad_now[iw] = WFC_list[iw]->evalGrad(*P_list[iw], iat);
}
/** compute the current gradients for the iat-th particle of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param grad_now the list of gradients in a walker batch, \f$\nabla\ln\Psi\f$
*/
virtual void mw_evalGrad(const std::vector<std::reference_wrapper<WaveFunctionComponent>>& WFC_list,
const std::vector<std::reference_wrapper<ParticleSet>>& P_list,
int iat,
std::vector<GradType>& grad_now)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
grad_now[iw] = WFC_list[iw].get().evalGrad(P_list[iw].get(), iat);
}
/** return the logarithmic gradient for the iat-th particle
* of the source particleset
* @param Pquantum particle set
* @param iat particle index
* @return the gradient of the iat-th particle
*/
virtual GradType evalGradSource(ParticleSet& P, ParticleSet& source, int iat)
{
// unit_test_hamiltonian calls this function incorrectly; do not abort for now
// APP_ABORT("WaveFunctionComponent::evalGradSource is not implemented");
return GradType();
}
/** Adds the gradient w.r.t. the iat-th particle of the
* source particleset (ions) of the logarithmic gradient
* and laplacian w.r.t. the target paritlceset (electrons).
* @param P quantum particle set (electrons)
* @param source classical particle set (ions)
* @param iat particle index of source (ion)
* @param the ion gradient of the elctron gradient
* @param the ion gradient of the elctron laplacian.
* @return the log gradient of psi w.r.t. the source particle iat
*/
virtual GradType evalGradSource(ParticleSet& P,
ParticleSet& source,
int iat,
TinyVector<ParticleSet::ParticleGradient_t, OHMMS_DIM>& grad_grad,
TinyVector<ParticleSet::ParticleLaplacian_t, OHMMS_DIM>& lapl_grad)
{
return GradType();
}
/** evaluate the ratio of the new to old WaveFunctionComponent value and the new gradient
* @param P the active ParticleSet
* @param iat the index of a particle
* @param grad_iat Gradient for the active particle
*/
virtual PsiValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat)
{
APP_ABORT("WaveFunctionComponent::ratioGrad is not implemented in " + ClassName + " class.");
return ValueType();
}
/** evaluate the ratio of the new to old WaveFunctionComponent value and the new spin gradient
* Default implementation assumes that WaveFunctionComponent does not explicitly depend on Spin.
* @param P the active ParticleSet
* @param iat the index of a particle
* @param grad_iat realspace gradient for the active particle
* @param spingrad_iat spin gradient for the active particle
*/
virtual PsiValueType ratioGradWithSpin(ParticleSet& P, int iat, GradType& grad_iat, LogValueType& spingrad_iat)
{
return ratioGrad(P, iat, grad_iat);
}
/** compute the ratio of the new to old WaveFunctionComponent value and the new gradient of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$
* @param grad_now the list of new gradients in a walker batch, \f$\nabla\ln\Psi\f$
*/
virtual void mw_ratioGrad(const std::vector<WaveFunctionComponent*>& WFC_list,
const std::vector<ParticleSet*>& P_list,
int iat,
std::vector<PsiValueType>& ratios,
std::vector<GradType>& grad_new)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
ratios[iw] = WFC_list[iw]->ratioGrad(*P_list[iw], iat, grad_new[iw]);
}
/** compute the ratio of the new to old WaveFunctionComponent value and the new gradient of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$
* @param grad_now the list of new gradients in a walker batch, \f$\nabla\ln\Psi\f$
*/
virtual void mw_ratioGrad(const RefVector<WaveFunctionComponent>& WFC_list,
const RefVector<ParticleSet>& P_list,
int iat,
std::vector<PsiValueType>& ratios,
std::vector<GradType>& grad_new)
{
//#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
ratios[iw] = WFC_list[iw].get().ratioGrad(P_list[iw], iat, grad_new[iw]);
}
/** a move for iat-th particle is accepted. Update the current content.
* @param P target ParticleSet
* @param iat index of the particle whose new position was proposed
*/
virtual void acceptMove(ParticleSet& P, int iat) = 0;
/** moves of the iat-th particle on some walkers in a batch is accepted. Update the current content.
* Note that all the lists only include accepted walkers.
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
*/
virtual void mw_acceptMove(const std::vector<WaveFunctionComponent*>& WFC_list,
const std::vector<ParticleSet*>& P_list,
int iat)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
WFC_list[iw]->acceptMove(*P_list[iw], iat);
}
/** complete all the delayed updates, must be called after each substep or step during pbyp move
*/
virtual void completeUpdates() {}
/** complete all the delayed updates for all the walkers in a batch
* must be called after each substep or step during pbyp move
*/
virtual void mw_completeUpdates(const std::vector<WaveFunctionComponent*>& WFC_list)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
WFC_list[iw]->completeUpdates();
}
/** If a move for iat-th particle is rejected, restore to the content.
* @param iat index of the particle whose new position was proposed
*
* Ye: hopefully we can gradually move away from restore
*/
virtual void restore(int iat) = 0;
/** If a move for iat-th particle on some walkers in a batch is rejected, restore their contents
* Note that all the lists only include rejected walkers.
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param iat index of the particle whose new position was proposed
*
* Ye: hopefully we can gradually move away from restore
*/
virtual void mw_restore(const std::vector<WaveFunctionComponent*>& WFC_list, int iat)
{
//#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
WFC_list[iw]->restore(iat);
}
/** evaluate the ratio of the new to old WaveFunctionComponent value
* @param P the active ParticleSet
* @param iat the index of a particle
* @return \f$ \psi( \{ {\bf R}^{'} \} )/ \psi( \{ {\bf R}\})\f$
*
* Specialized for particle-by-particle move
*/
virtual PsiValueType ratio(ParticleSet& P, int iat) = 0;
/** compute the ratio of the new to old WaveFunctionComponent value of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$
*/
virtual void mw_calcRatio(const std::vector<WaveFunctionComponent*>& WFC_list,
const std::vector<ParticleSet*>& P_list,
int iat,
std::vector<PsiValueType>& ratios)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
ratios[iw] = WFC_list[iw]->ratio(*P_list[iw], iat);
}
/** compute the ratio of the new to old WaveFunctionComponent value of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$
*/
virtual void mw_calcRatio(const RefVector<WaveFunctionComponent>& WFC_list,
const RefVector<ParticleSet>& P_list,
int iat,
std::vector<PsiValueType>& ratios)
{
//#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
ratios[iw] = WFC_list[iw].get().ratio(P_list[iw], iat);
}
/** For particle-by-particle move. Requests space in the buffer
* based on the data type sizes of the objects in this class.
* @param P particle set
* @param buf Anonymous storage
*/
virtual void registerData(ParticleSet& P, WFBufferType& buf) = 0;
/** For particle-by-particle move. Requests space in the buffer
* based on the data type sizes of the objects in this class.
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param buf_list Anonymous storage
*/
virtual void mw_registerData(const std::vector<WaveFunctionComponent*>& WFC_list,
const std::vector<ParticleSet*>& P_list,
const std::vector<WFBufferType*>& buf_list)
{
// We can't make this static but we can use a lambda with no capture to
// restrict access to *this scope
auto registerComponentData = [](WaveFunctionComponent& wfc, ParticleSet& pset, WFBufferType& wfb) {
wfc.registerData(pset, wfb);
};
for (int iw = 0; iw < WFC_list.size(); iw++)
registerComponentData(*(WFC_list[iw]), *(P_list[iw]), *(buf_list[iw]));
}
/** For particle-by-particle move. Put the objects of this class
* in the walker buffer or forward the memory cursor.
* @param P particle set
* @param buf Anonymous storage
* @param fromscratch request recomputing the precision critical
* pieces of wavefunction from scratch
* @return log value of the wavefunction.
*/
virtual LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false) = 0;
/** For particle-by-particle move. Put the objects of this class
* in the walker buffer or forward the memory cursor.
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param buf_list Anonymous storage
* @@param values the log WF values of walkers in a batch
* @param fromscratch request recomputing the precision critical
* pieces of wavefunction from scratch
*/
virtual void mw_updateBuffer(const RefVector<WaveFunctionComponent>& WFC_list,
const RefVector<ParticleSet>& P_list,
const RefVector<WFBufferType>& buf_list,
bool fromscratch = false)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
WFC_list[iw].get().updateBuffer(P_list[iw], buf_list[iw], fromscratch);
}
/** For particle-by-particle move. Copy data or attach memory
* from a walker buffer to the objects of this class.
* The log value, P.G and P.L contribution from the objects
* of this class are also added.
* @param P particle set
* @param buf Anonymous storage
*/
virtual void copyFromBuffer(ParticleSet& P, WFBufferType& buf) = 0;
/** For particle-by-particle move. Copy data or attach memory
* from a walker buffer to the objects of this class.
* @param P particle set
* @param buf Anonymous storage
*/
virtual void mw_copyFromBuffer(const RefVector<WaveFunctionComponent>& wfc_list,
const RefVector<ParticleSet>& p_list,
const RefVector<WFBufferType>& buf_list)
{
#pragma omp parallel for
for (int iw = 0; iw < wfc_list.size(); iw++)
wfc_list[iw].get().copyFromBuffer(p_list[iw], buf_list[iw]);
}
/** make clone
* @param tqp target Quantum ParticleSet
* @param deepcopy if true, make a decopy
*
* If not true, return a proxy class
*/
virtual WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const;
/** Intended as a handle to break
*
*
*/
//virtual WaveFunctionComponentPtr makeThrScope(std::vector<std::pair<int,int>>& ptcl_group_indexes) const = 0;
/** Return the Chiesa kinetic energy correction
*/
virtual RealType KECorrection();
/** Compute derivatives of the wavefunction with respect to the optimizable
* parameters.
* @param P particle set
* @param optvars optimizable parameters
* @param dlogpsi array of derivatives of the log of the wavefunction
* @param dhpsioverpsi array of derivatives of the Laplacian of the wavefunction divided by the wavefunction.
* Note that this does not use the Laplacian of the log of the wavefunction, as in evaluateLog.
* Also the factor of -1/2 from the kinetic energy must be included here. The 1/m
* factor is applied in TrialWaveFunction.
*/
virtual void evaluateDerivatives(ParticleSet& P,
const opt_variables_type& optvars,
std::vector<ValueType>& dlogpsi,
std::vector<ValueType>& dhpsioverpsi);
/** Compute derivatives of rhe wavefunction with respect to the optimizable
* parameters
* @param P particle set
* @param optvars optimizable parameters
* @param dlogpsi array of derivatives of the log of the wavefunction
* Note: this function differs from the evaluateDerivatives function in the way that it only computes
* the derivative of the log of the wavefunction.
*/
virtual void evaluateDerivativesWF(ParticleSet& P,
const opt_variables_type& optvars,
std::vector<ValueType>& dlogpsi);
virtual void multiplyDerivsByOrbR(std::vector<ValueType>& dlogpsi)
{
RealType myrat = std::real(LogToValue<PsiValueType>::convert(LogValue));
for (int j = 0; j < myVars.size(); j++)
{
int loc = myVars.where(j);
dlogpsi[loc] *= myrat;
}
}
/** Calculates the derivatives of \f$ \grad(\textrm{log}(\psif)) \f$ with respect to
the optimizable parameters, and the dot product of this is then
performed with the passed-in G_in gradient vector. This object is then
returned as dgradlogpsi.
*/
virtual void evaluateGradDerivatives(const ParticleSet::ParticleGradient_t& G_in, std::vector<ValueType>& dgradlogpsi)
{
APP_ABORT("Need specialization of WaveFunctionComponent::evaluateGradDerivatives in " + ClassName + " class.\n");
}
virtual void finalizeOptimization() {}
/** evaluate the ratios of one virtual move with respect to all the particles
* @param P reference particleset
* @param ratios \f$ ratios[i]=\{{\bf R}\}\rightarrow {r_0,\cdots,r_i^p=pos,\cdots,r_{N-1}}\f$
*/
virtual void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios);
/** evaluate ratios to evaluate the non-local PP
* @param VP VirtualParticleSet
* @param ratios ratios with new positions VP.R[k] the VP.refPtcl
*/
virtual void evaluateRatios(VirtualParticleSet& VP, std::vector<ValueType>& ratios);
/** evaluate ratios to evaluate the non-local PP
* @param VP VirtualParticleSet
* @param ratios ratios with new positions VP.R[k] the VP.refPtcl
* @param dratios \f$\partial_{\alpha}(\ln \Psi ({\bf R}^{\prime}) - \ln \Psi ({\bf R})) \f$
*/
virtual void evaluateDerivRatios(VirtualParticleSet& VP,
const opt_variables_type& optvars,
std::vector<ValueType>& ratios,
Matrix<ValueType>& dratios);
/////////////////////////////////////////////////////
// Functions for vectorized evaluation and updates //
/////////////////////////////////////////////////////
#ifdef QMC_CUDA
using CTS = CUDAGlobalTypes;
virtual void freeGPUmem() {}
virtual void recompute(MCWalkerConfiguration& W, bool firstTime) {}
virtual void reserve(PointerPool<gpu::device_vector<CTS::ValueType>>& pool, int kblocksize) {}
/** Evaluate the log of the WF for all walkers
* @param walkers vector of all walkers
* @param logPsi output vector of log(psi)
*/
virtual void addLog(MCWalkerConfiguration& W, std::vector<RealType>& logPsi)
{
APP_ABORT("Need specialization of WaveFunctionComponent::addLog for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
/** Evaluate the wave-function ratio w.r.t. moving particle iat
* for all walkers
* @param walkers vector of all walkers
* @param iat particle which is moving
* @param psi_ratios output vector with psi_new/psi_old
*/
virtual void ratio(MCWalkerConfiguration& W, int iat, std::vector<ValueType>& psi_ratios)
{
APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
// Returns the WF ratio and gradient w.r.t. iat for each walker
// in the respective vectors
virtual void ratio(MCWalkerConfiguration& W, int iat, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad)
{
APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void ratio(MCWalkerConfiguration& W,
int iat,
std::vector<ValueType>& psi_ratios,
std::vector<GradType>& grad,
std::vector<ValueType>& lapl)
{
APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void calcRatio(MCWalkerConfiguration& W,
int iat,
std::vector<ValueType>& psi_ratios,
std::vector<GradType>& grad,
std::vector<ValueType>& lapl)
{
APP_ABORT("Need specialization of WaveFunctionComponent::calcRatio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void addRatio(MCWalkerConfiguration& W,
int iat,
int k,
std::vector<ValueType>& psi_ratios,
std::vector<GradType>& grad,
std::vector<ValueType>& lapl)
{
APP_ABORT("Need specialization of WaveFunctionComponent::addRatio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void ratio(std::vector<Walker_t*>& walkers,
std::vector<int>& iatList,
std::vector<PosType>& rNew,
std::vector<ValueType>& psi_ratios,
std::vector<GradType>& grad,
std::vector<ValueType>& lapl)
{
APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void addGradient(MCWalkerConfiguration& W, int iat, std::vector<GradType>& grad)
{
APP_ABORT("Need specialization of WaveFunctionComponent::addGradient for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void calcGradient(MCWalkerConfiguration& W, int iat, int k, std::vector<GradType>& grad)
{
APP_ABORT("Need specialization of WaveFunctionComponent::calcGradient for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void gradLapl(MCWalkerConfiguration& W, GradMatrix_t& grads, ValueMatrix_t& lapl)
{
APP_ABORT("Need specialization of WaveFunctionComponent::gradLapl for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void det_lookahead(MCWalkerConfiguration& W,
std::vector<ValueType>& psi_ratios,
std::vector<GradType>& grad,
std::vector<ValueType>& lapl,
int iat,
int k,
int kd,
int nw)
{
APP_ABORT("Need specialization of WaveFunctionComponent::det_lookahead for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void update(MCWalkerConfiguration* W, std::vector<Walker_t*>& walkers, int iat, std::vector<bool>* acc, int k)
{
APP_ABORT("Need specialization of WaveFunctionComponent::update for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void update(const std::vector<Walker_t*>& walkers, const std::vector<int>& iatList)
{
APP_ABORT("Need specialization of WaveFunctionComponent::update for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void NLratios(MCWalkerConfiguration& W,
std::vector<NLjob>& jobList,
std::vector<PosType>& quadPoints,
std::vector<ValueType>& psi_ratios)
{
APP_ABORT("Need specialization of WaveFunctionComponent::NLRatios for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void NLratios(MCWalkerConfiguration& W,
gpu::device_vector<CUDA_PRECISION*>& Rlist,
gpu::device_vector<int*>& ElecList,
gpu::device_vector<int>& NumCoreElecs,
gpu::device_vector<CUDA_PRECISION*>& QuadPosList,
gpu::device_vector<CUDA_PRECISION*>& RatioList,
int numQuadPoints)
{
APP_ABORT("Need specialization of WaveFunctionComponent::NLRatios for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void evaluateDerivatives(MCWalkerConfiguration& W,
const opt_variables_type& optvars,
RealMatrix_t& dgrad_logpsi,
RealMatrix_t& dhpsi_over_psi)
{
APP_ABORT("Need specialization of WaveFunctionComponent::evaluateDerivatives for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
#endif
};
} // namespace qmcplusplus
#endif
|
matrix.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "base.h"
#include "matrix.h"
#include "util.h"
#include "timer.h"
#include "splatt_lapack.h"
#include <math.h>
#ifdef SPLATT_USE_MPI
#include <mpi.h>
#else
/* define MPI_Comm to make life easier without MPI */
typedef int MPI_Comm;
#endif
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
/**
* @brief Normalize each column of a and store the column l_2 norms in 'lambda'.
* If SPLATt_USE_MPI is defined, it will aggregate the norms over MPI
* communicator 'comm'. 'comm' is not touched if SPLATT_USE_MPI is not
* defined.
*
* @param[out] A The matrix whose columns we normalze.
* @param[out] lambda The column norms.
* @param comm The MPI communicator.
*/
static void p_mat_2norm(
matrix_t * const A,
val_t * const restrict lambda,
MPI_Comm comm)
{
idx_t const I = A->I;
idx_t const J = A->J;
val_t * const restrict vals = A->vals;
#pragma omp parallel
{
int const tid = splatt_omp_get_thread_num();
val_t * restrict mylambda = splatt_malloc(J * sizeof(*mylambda));
for(idx_t j=0; j < J; ++j) {
mylambda[j] = 0;
}
#pragma omp for schedule(static)
for(idx_t i=0; i < I; ++i) {
for(idx_t j=0; j < J; ++j) {
mylambda[j] += vals[j + (i*J)] * vals[j + (i*J)];
}
}
/* do reduction on partial sums */
thread_allreduce(mylambda, J, SPLATT_REDUCE_SUM);
#pragma omp master
{
#ifdef SPLATT_USE_MPI
/* now do an MPI reduction to get the global lambda */
timer_start(&timers[TIMER_MPI_NORM]);
timer_start(&timers[TIMER_MPI_COMM]);
MPI_Allreduce(mylambda, lambda, J, SPLATT_MPI_VAL, MPI_SUM, comm);
timer_stop(&timers[TIMER_MPI_COMM]);
timer_stop(&timers[TIMER_MPI_NORM]);
#else
for(idx_t j=0; j < J; ++j) {
lambda[j] = mylambda[j];
}
#endif
/* compute the final norms */
for(idx_t j=0; j < J; ++j) {
lambda[j] = sqrt(lambda[j]);
}
}
#pragma omp barrier
/* do the normalization */
#pragma omp for schedule(static)
for(idx_t i=0; i < I; ++i) {
for(idx_t j=0; j < J; ++j) {
vals[j+(i*J)] /= lambda[j];
}
}
splatt_free(mylambda);
} /* end omp parallel */
}
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
void mat_cholesky(
matrix_t const * const A)
{
timer_start(&timers[TIMER_CHOLESKY]);
/* check dimensions */
assert(A->I == A->J);
/* Cholesky factorization */
splatt_blas_int N = A->I;
val_t * const restrict neqs = A->vals;
char uplo = 'L';
splatt_blas_int order = N;
splatt_blas_int lda = N;
splatt_blas_int info;
LAPACK_DPOTRF(&uplo, &order, neqs, &lda, &info);
if(info) {
fprintf(stderr, "SPLATT: DPOTRF returned %d\n", info);
}
timer_stop(&timers[TIMER_CHOLESKY]);
}
void mat_solve_cholesky(
matrix_t * const cholesky,
matrix_t * const rhs)
{
/* Chunked AO-ADMM will call this from a parallel region. */
if(!splatt_omp_in_parallel()) {
timer_start(&timers[TIMER_BACKSOLVE]);
}
splatt_blas_int N = cholesky->I;
/* Solve against rhs */
char tri = 'L';
splatt_blas_int lda = N;
splatt_blas_int info;
splatt_blas_int nrhs = rhs->I;
splatt_blas_int ldb = N;
LAPACK_DPOTRS(&tri, &N, &nrhs, cholesky->vals, &lda, rhs->vals, &ldb, &info);
if(info) {
fprintf(stderr, "SPLATT: DPOTRS returned %d\n", info);
}
if(!splatt_omp_in_parallel()) {
timer_stop(&timers[TIMER_BACKSOLVE]);
}
}
val_t mat_trace(
matrix_t const * const A)
{
assert(A->I == A->J);
idx_t const N = A->I;
val_t const * const restrict vals = A->vals;
val_t trace = 0.;
for(idx_t i=0; i < N; ++i) {
trace += vals[i + (i*N)];
}
return trace;
}
void mat_aTa(
matrix_t const * const A,
matrix_t * const ret)
{
timer_start(&timers[TIMER_ATA]);
/* check matrix dimensions */
assert(ret->I == ret->J);
assert(ret->I == A->J);
assert(ret->vals != NULL);
assert(A->rowmajor);
assert(ret->rowmajor);
idx_t const I = A->I;
idx_t const F = A->J;
char uplo = 'L';
char trans = 'N'; /* actually do A * A' due to row-major ordering */
splatt_blas_int N = (splatt_blas_int) F;
splatt_blas_int K = (splatt_blas_int) I;
splatt_blas_int lda = N;
splatt_blas_int ldc = N;
val_t alpha = 1.;
val_t beta = 0.;
SPLATT_BLAS(syrk)(
&uplo, &trans,
&N, &K,
&alpha,
A->vals, &lda,
&beta,
ret->vals, &ldc);
timer_stop(&timers[TIMER_ATA]);
}
#ifdef SPLATT_USE_MPI
void mat_aTa_mpi(
matrix_t const * const A,
matrix_t * const ret,
MPI_Comm comm)
{
/* local matrix multiplication */
mat_aTa(A, ret);
/* aggregate results */
idx_t const F = A->J;
timer_start(&timers[TIMER_ATA]);
timer_start(&timers[TIMER_MPI_ATA]);
timer_start(&timers[TIMER_MPI_COMM]);
MPI_Allreduce(MPI_IN_PLACE, ret->vals, F * F, SPLATT_MPI_VAL, MPI_SUM, comm);
timer_stop(&timers[TIMER_MPI_COMM]);
timer_stop(&timers[TIMER_MPI_ATA]);
timer_stop(&timers[TIMER_ATA]);
}
#endif
void mat_matmul(
matrix_t const * const A,
matrix_t const * const B,
matrix_t * const C)
{
timer_start(&timers[TIMER_MATMUL]);
C->I = A->I;
C->J = B->J;
/* check dimensions */
assert(A->J == B->I);
assert(C->I * C->J <= A->I * B->J);
/* set dimensions */
C->I = A->I;
C->J = B->J;
/* This calls column-major BLAS by instead computing: C^T = B^T * A^T. */
char transA = 'N';
char transB = 'N';
val_t * a_vals = B->vals;
val_t * b_vals = A->vals;
val_t * c_vals = C->vals;
splatt_blas_int M = B->J;
splatt_blas_int N = A->I;
splatt_blas_int K = A->J;
splatt_blas_int lda = M;
splatt_blas_int ldb = K;
splatt_blas_int ldc = M;
val_t alpha = 1.;
val_t beta = 0.;
SPLATT_BLAS(gemm)(
&transA, &transB,
&M, &N, &K,
&alpha,
a_vals, &lda,
b_vals, &ldb,
&beta,
c_vals, &ldc);
timer_stop(&timers[TIMER_MATMUL]);
}
void mat_normalize(
matrix_t * const A,
val_t * const restrict lambda)
{
timer_start(&timers[TIMER_MATNORM]);
#ifdef SPLATT_USE_MPI
/* passing comm=0 will break things in MPI mode */
fprintf(stderr, "SPLATT: mat_normalize() is invalid in MPI mode. ");
fprintf(stderr, "Use mat_normalize_mpi() instead.\n");
return;
#endif
p_mat_2norm(A, lambda, 0);
timer_stop(&timers[TIMER_MATNORM]);
}
#ifdef SPLATT_USE_MPI
void mat_normalize_mpi(
matrix_t * const A,
val_t * const restrict lambda,
MPI_Comm comm)
{
timer_start(&timers[TIMER_MATNORM]);
p_mat_2norm(A, lambda, comm);
timer_stop(&timers[TIMER_MATNORM]);
}
#endif
void mat_form_gram(
matrix_t * * aTa,
matrix_t * out_mat,
idx_t nmodes,
idx_t mode)
{
idx_t const N = aTa[mode]->J;
val_t * const restrict gram = out_mat->vals;
#pragma omp parallel
{
/* first initialize */
#pragma omp for schedule(static, 1)
for(idx_t i=0; i < N; ++i) {
for(idx_t j=i; j < N; ++j) {
gram[j+(i*N)] = 1.;
}
}
for(idx_t m=0; m < nmodes; ++m) {
if(m == mode) {
continue;
}
/* only work with upper triangular */
val_t const * const restrict mat = aTa[m]->vals;
#pragma omp for schedule(static, 1) nowait
for(idx_t i=0; i < N; ++i) {
for(idx_t j=i; j < N; ++j) {
gram[j+(i*N)] *= mat[j+(i*N)];
}
}
}
} /* omp parallel */
}
void mat_add_diag(
matrix_t * const A,
val_t const scalar)
{
idx_t const rank = A->J;
val_t * const restrict vals = A->vals;
for(idx_t i=0; i < rank; ++i) {
vals[i + (i*rank)] += scalar;
}
}
matrix_t * mat_alloc(
idx_t const nrows,
idx_t const ncols)
{
matrix_t * mat = (matrix_t *) splatt_malloc(sizeof(matrix_t));
mat->I = nrows;
mat->J = ncols;
mat->vals = (val_t *) splatt_malloc(nrows * ncols * sizeof(val_t));
mat->rowmajor = 1;
return mat;
}
matrix_t * mat_rand(
idx_t const nrows,
idx_t const ncols)
{
matrix_t * mat = mat_alloc(nrows, ncols);
val_t * const vals = mat->vals;
fill_rand(vals, nrows * ncols);
return mat;
}
matrix_t * mat_zero(
idx_t const nrows,
idx_t const ncols)
{
matrix_t * mat = mat_alloc(nrows, ncols);
/* Initialize in parallel in case system is NUMA. This may bring a small
* improvement. */
#pragma omp parallel for schedule(static)
for(idx_t i=0; i < nrows; ++i) {
for(idx_t j=0; j < ncols; ++j) {
mat->vals[j + (i*ncols)] = 0.;
}
}
return mat;
}
matrix_t * mat_mkptr(
val_t * const data,
idx_t rows,
idx_t cols,
int rowmajor)
{
matrix_t * mat = splatt_malloc(sizeof(*mat));
mat_fillptr(mat, data, rows, cols, rowmajor);
return mat;
}
void mat_fillptr(
matrix_t * ptr,
val_t * const data,
idx_t rows,
idx_t cols,
int rowmajor)
{
ptr->I = rows;
ptr->J = cols;
ptr->rowmajor = rowmajor;
ptr->vals = data;
}
void mat_free(
matrix_t * mat)
{
if(mat == NULL) {
return;
}
splatt_free(mat->vals);
splatt_free(mat);
}
matrix_t * mat_mkrow(
matrix_t const * const mat)
{
assert(mat->rowmajor == 0);
idx_t const I = mat->I;
idx_t const J = mat->J;
matrix_t * row = mat_alloc(I, J);
val_t * const restrict rowv = row->vals;
val_t const * const restrict colv = mat->vals;
for(idx_t i=0; i < I; ++i) {
for(idx_t j=0; j < J; ++j) {
rowv[j + (i*J)] = colv[i + (j*I)];
}
}
return row;
}
matrix_t * mat_mkcol(
matrix_t const * const mat)
{
assert(mat->rowmajor == 1);
idx_t const I = mat->I;
idx_t const J = mat->J;
matrix_t * col = mat_alloc(I, J);
val_t * const restrict colv = col->vals;
val_t const * const restrict rowv = mat->vals;
for(idx_t i=0; i < I; ++i) {
for(idx_t j=0; j < J; ++j) {
colv[i + (j*I)] = rowv[j + (i*J)];
}
}
col->rowmajor = 0;
return col;
}
spmatrix_t * spmat_alloc(
idx_t const nrows,
idx_t const ncols,
idx_t const nnz)
{
spmatrix_t * mat = (spmatrix_t*) splatt_malloc(sizeof(spmatrix_t));
mat->I = nrows;
mat->J = ncols;
mat->nnz = nnz;
mat->rowptr = (idx_t*) splatt_malloc((nrows+1) * sizeof(idx_t));
mat->colind = (idx_t*) splatt_malloc(nnz * sizeof(idx_t));
mat->vals = (val_t*) splatt_malloc(nnz * sizeof(val_t));
return mat;
}
void spmat_free(
spmatrix_t * mat)
{
free(mat->rowptr);
free(mat->colind);
free(mat->vals);
free(mat);
}
val_t mat_norm(
matrix_t const * const A)
{
val_t norm = 0.;
val_t const * const restrict vals = A->vals;
#pragma omp parallel for schedule(static) reduction(+:norm)
for(idx_t x=0; x < A->I * A->J; ++x) {
norm += vals[x] * vals[x];
}
return sqrt(norm);
}
val_t mat_norm_diff(
matrix_t const * const A,
matrix_t const * const B)
{
assert(A->I == B->I);
assert(A->J == B->J);
val_t norm = 0.;
val_t const * const restrict avals = A->vals;
val_t const * const restrict bvals = B->vals;
#pragma omp parallel for schedule(static) reduction(+:norm)
for(idx_t x=0; x < A->I * A->J; ++x) {
val_t const diff = avals[x] - bvals[x];
norm += diff * diff;
}
return sqrt(norm);
}
|
Trainer.h | /*
* Copyright 2016 [See AUTHORS file for list of authors]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _TRAINER_
#define _TRAINER_
#include <float.h>
#include <limits.h>
#include <stdio.h>
#include <vector>
#include "defines.h"
// Contains times / losses / etc
struct TrainStatistics {
std::vector<double> times;
std::vector<double> losses;
};
typedef struct TrainStatistics TrainStatistics;
class Trainer {
protected:
void TrackTimeLoss(double cur_time, double cur_loss, TrainStatistics *stats) {
stats->times.push_back(cur_time);
stats->losses.push_back(cur_loss);
}
void PrintPartitionTime(Timer &timer) { printf("Partition Time(s): %f\n", timer.Elapsed()); }
void PrintTimeLoss(double cur_time, double cur_loss, int epoch) {
printf("Epoch: %d\tTime(s): %f\tLoss: %lf\t\n", epoch, cur_time, cur_loss);
}
void EpochBegin(int epoch, Timer &gradient_timer, Model *model, const std::vector<Datapoint *> &datapoints,
TrainStatistics *stats) {
double cur_time = gradient_timer.Elapsed();
double cur_loss = model->ComputeLoss(datapoints);
TrackTimeLoss(cur_time, cur_loss, stats);
if (FLAGS_print_loss_per_epoch && epoch % FLAGS_interval_print == 0) {
PrintTimeLoss(cur_time, cur_loss, epoch);
}
}
public:
Trainer() {
/*
// Some error checking.
if (FLAGS_n_threads > std::thread::hardware_concurrency()) {
std::cerr << "Trainer: Number of threads is greater than the number of physical cores." << std::endl;
// exit(0);
}
// Basic set up, like pinning to core, setting number of threads.
omp_set_num_threads(FLAGS_n_threads);
#pragma omp parallel
{ pin_to_core(omp_get_thread_num()); }
*/
}
virtual ~Trainer() {}
// Main training method.
virtual TrainStatistics Train(Model *model, const std::vector<Datapoint *> &datapoints, Updater *updater) = 0;
};
#endif
|
dtype_transfer.c | /*
* This file contains low-level loops for data type transfers.
* In particular the function PyArray_GetDTypeTransferFunction is
* implemented here.
*
* Copyright (c) 2010 by Mark Wiebe (mwwiebe@gmail.com)
* The University of British Columbia
*
* See LICENSE.txt for the license.
*/
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#include "structmember.h"
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define PY_ARRAY_UNIQUE_SYMBOL MICPY_ARRAY_API
#define NO_IMPORT_ARRAY
#include <numpy/arrayobject.h>
#include <numpy/npy_cpu.h>
#include "npy_pycompat.h"
#define _MICARRAYMODULE
#include "arrayobject.h"
#include "convert_datatype.h"
#include "creators.h"
//#include "_datetime.h"
//#include "datetime_strings.h"
#include "mpy_lowlevel_strided_loops.h"
#include "common.h"
#include "dtype_transfer.h"
#include "shape.h"
#include "lowlevel_strided_loops.h"
#define NPY_LOWLEVEL_BUFFER_BLOCKSIZE 128
/********** PRINTF DEBUG TRACING **************/
#define NPY_DT_DBG_TRACING 0
/* Tracing incref/decref can be very noisy */
#define NPY_DT_REF_DBG_TRACING 0
#if NPY_DT_REF_DBG_TRACING
#define NPY_DT_DBG_REFTRACE(msg, ref) \
printf("%-12s %20p %s%d%s\n", msg, ref, \
ref ? "(refcnt " : "", \
ref ? (int)ref->ob_refcnt : 0, \
ref ? ((ref->ob_refcnt <= 0) ? \
") <- BIG PROBLEM!!!!" : ")") : ""); \
fflush(stdout);
#else
#define NPY_DT_DBG_REFTRACE(msg, ref)
#endif
/**********************************************/
/*
* Returns a transfer function which DECREFs any references in src_type.
*
* Returns NPY_SUCCEED or NPY_FAIL.
*/
/*static int
get_decsrcref_transfer_function(int aligned,
npy_intp src_stride,
PyArray_Descr *src_dtype,
PyMicArray_StridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata,
int *out_needs_api);*/
/*
* Returns a transfer function which zeros out the dest values.
*
* Returns NPY_SUCCEED or NPY_FAIL.
*/
/*static int
get_setdstzero_transfer_function(int aligned,
npy_intp dst_stride,
PyArray_Descr *dst_dtype,
PyMicArray_StridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata,
int *out_needs_api);
*/
/*
* Returns a transfer function which sets a boolean type to ones.
*
* Returns NPY_SUCCEED or NPY_FAIL.
*/
/*NPY_NO_EXPORT int
get_bool_setdstone_transfer_function(npy_intp dst_stride,
PyMicArray_StridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata,
int *NPY_UNUSED(out_needs_api));*/
/*************************** DEST SETZERO *******************************/
/* Sets dest to zero */
typedef struct {
NpyAuxData base;
npy_intp dst_itemsize;
} _dst_memset_zero_data;
/* zero-padded data copy function */
static NpyAuxData *_dst_memset_zero_data_clone(NpyAuxData *data)
{
_dst_memset_zero_data *newdata =
(_dst_memset_zero_data *)PyArray_malloc(
sizeof(_dst_memset_zero_data));
if (newdata == NULL) {
return NULL;
}
memcpy(newdata, data, sizeof(_dst_memset_zero_data));
return (NpyAuxData *)newdata;
}
static void
_null_to_strided_memset_zero(void *_dst,
npy_intp dst_stride,
void *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
NpyAuxData *data, int device)
{
_dst_memset_zero_data *d = (_dst_memset_zero_data *)data;
npy_intp dst_itemsize = d->dst_itemsize;
#pragma omp target device(device) map(to: N, _dst, dst_stride, dst_itemsize)
{
char *dst = (char *) _dst;
while (N--) {
memset(dst, 0, dst_itemsize);
dst += dst_stride;
}
}
}
static void
_null_to_contig_memset_zero(void *dst,
npy_intp dst_stride,
void *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
NpyAuxData *data, int device)
{
_dst_memset_zero_data *d = (_dst_memset_zero_data *)data;
npy_intp dst_itemsize = d->dst_itemsize;
target_memset(dst, 0, N*dst_itemsize, device);
}
NPY_NO_EXPORT int
get_setdstzero_transfer_function(int aligned,
npy_intp dst_stride,
PyArray_Descr *dst_dtype,
PyMicArray_StridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata,
int *out_needs_api)
{
_dst_memset_zero_data *data;
/* If there are no references, just set the whole thing to zero */
if (!PyDataType_REFCHK(dst_dtype)) {
data = (_dst_memset_zero_data *)
PyArray_malloc(sizeof(_dst_memset_zero_data));
if (data == NULL) {
PyErr_NoMemory();
return NPY_FAIL;
}
data->base.free = (NpyAuxData_FreeFunc *)(&PyArray_free);
data->base.clone = &_dst_memset_zero_data_clone;
data->dst_itemsize = dst_dtype->elsize;
if (dst_stride == data->dst_itemsize) {
*out_stransfer = &_null_to_contig_memset_zero;
}
else {
*out_stransfer = &_null_to_strided_memset_zero;
}
*out_transferdata = (NpyAuxData *)data;
}
/* If it's exactly one reference, use the decref function */
else if (dst_dtype->type_num == NPY_OBJECT) {
if (out_needs_api) {
*out_needs_api = 1;
}
*out_stransfer = NULL;
*out_transferdata = NULL;
return NPY_FAIL;
}
/* If there are subarrays, need to wrap it */
else if (PyDataType_HASSUBARRAY(dst_dtype)) {
//TODO: implement later
*out_stransfer = NULL;
*out_transferdata = NULL;
return NPY_FAIL;
}
/* If there are fields, need to do each field */
else if (PyDataType_HASFIELDS(dst_dtype)) {
*out_stransfer = NULL;
*out_transferdata = NULL;
return NPY_FAIL;
}
return NPY_SUCCEED;
}
static void
_dec_src_ref_nop(void *NPY_UNUSED(dst),
npy_intp NPY_UNUSED(dst_stride),
void *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
npy_intp NPY_UNUSED(N),
npy_intp NPY_UNUSED(src_itemsize),
NpyAuxData *NPY_UNUSED(data), int device)
{
/* NOP */
}
/***************** WRAP ALIGNED CONTIGUOUS TRANSFER FUNCTION **************/
/* Wraps a transfer function + data in alignment code */
typedef struct {
NpyAuxData base;
int device;
PyMicArray_StridedUnaryOp *wrapped,
*tobuffer, *frombuffer;
NpyAuxData *wrappeddata, *todata, *fromdata;
npy_intp src_itemsize, dst_itemsize;
char *bufferin, *bufferout;
} _align_wrap_data;
/* transfer data free function */
static void _align_wrap_data_free(NpyAuxData *data)
{
_align_wrap_data *d = (_align_wrap_data *)data;
NPY_AUXDATA_FREE(d->wrappeddata);
NPY_AUXDATA_FREE(d->todata);
NPY_AUXDATA_FREE(d->fromdata);
target_free(d->bufferin, d->device);
PyArray_free(data);
}
/* transfer data copy function */
static NpyAuxData *_align_wrap_data_clone(NpyAuxData *data)
{
_align_wrap_data *d = (_align_wrap_data *)data;
_align_wrap_data *newdata;
npy_intp basedatasize, buffersize, datasize;
/* Round up the structure size to 16-byte boundary */
basedatasize = (sizeof(_align_wrap_data)+15)&(-0x10);
/* Add space for two low level buffers */
buffersize = NPY_LOWLEVEL_BUFFER_BLOCKSIZE*d->src_itemsize +
NPY_LOWLEVEL_BUFFER_BLOCKSIZE*d->dst_itemsize;
datasize = basedatasize + buffersize;
/* Allocate the data, and populate it */
newdata = (_align_wrap_data *)PyArray_malloc(basedatasize);
if (newdata == NULL) {
return NULL;
}
memcpy(newdata, data, basedatasize);
//newdata->bufferin = (char *)newdata + basedatasize;
newdata->bufferin = (char *) target_alloc(buffersize, newdata->device);
if (newdata->bufferin == NULL) {
PyArray_free(newdata);
return NULL;
}
newdata->bufferout = newdata->bufferin +
NPY_LOWLEVEL_BUFFER_BLOCKSIZE*newdata->src_itemsize;
if (newdata->wrappeddata != NULL) {
newdata->wrappeddata = NPY_AUXDATA_CLONE(d->wrappeddata);
if (newdata->wrappeddata == NULL) {
target_free(newdata->bufferin, newdata->device);
PyArray_free(newdata);
return NULL;
}
}
if (newdata->todata != NULL) {
newdata->todata = NPY_AUXDATA_CLONE(d->todata);
if (newdata->todata == NULL) {
NPY_AUXDATA_FREE(newdata->wrappeddata);
target_free(newdata->bufferin, newdata->device);
PyArray_free(newdata);
return NULL;
}
}
if (newdata->fromdata != NULL) {
newdata->fromdata = NPY_AUXDATA_CLONE(d->fromdata);
if (newdata->fromdata == NULL) {
NPY_AUXDATA_FREE(newdata->wrappeddata);
NPY_AUXDATA_FREE(newdata->todata);
target_free(newdata->bufferin, newdata->device);
PyArray_free(newdata);
return NULL;
}
}
return (NpyAuxData *)newdata;
}
static void
_strided_to_strided_contig_align_wrap(void *dst, npy_intp dst_stride,
void *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
NpyAuxData *data, int device)
{
_align_wrap_data *d = (_align_wrap_data *)data;
PyMicArray_StridedUnaryOp *wrapped = d->wrapped,
*tobuffer = d->tobuffer,
*frombuffer = d->frombuffer;
npy_intp inner_src_itemsize = d->src_itemsize,
dst_itemsize = d->dst_itemsize;
NpyAuxData *wrappeddata = d->wrappeddata,
*todata = d->todata,
*fromdata = d->fromdata;
char *bufferin = d->bufferin, *bufferout = d->bufferout;
if (d->device != device) {
return;
}
for(;;) {
if (N > NPY_LOWLEVEL_BUFFER_BLOCKSIZE) {
tobuffer(bufferin, inner_src_itemsize, src, src_stride,
NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
src_itemsize, todata, device);
wrapped(bufferout, dst_itemsize, bufferin, inner_src_itemsize,
NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
inner_src_itemsize, wrappeddata, device);
frombuffer(dst, dst_stride, bufferout, dst_itemsize,
NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
dst_itemsize, fromdata, device);
N -= NPY_LOWLEVEL_BUFFER_BLOCKSIZE;
src += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*src_stride;
dst += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*dst_stride;
}
else {
tobuffer(bufferin, inner_src_itemsize, src, src_stride, N,
src_itemsize, todata, device);
wrapped(bufferout, dst_itemsize, bufferin, inner_src_itemsize, N,
inner_src_itemsize, wrappeddata, device);
frombuffer(dst, dst_stride, bufferout, dst_itemsize, N,
dst_itemsize, fromdata, device);
return;
}
}
}
static void
_strided_to_strided_contig_align_wrap_init_dest(void *dst, npy_intp dst_stride,
void *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
NpyAuxData *data, int device)
{
_align_wrap_data *d = (_align_wrap_data *)data;
PyMicArray_StridedUnaryOp *wrapped = d->wrapped,
*tobuffer = d->tobuffer,
*frombuffer = d->frombuffer;
npy_intp inner_src_itemsize = d->src_itemsize,
dst_itemsize = d->dst_itemsize;
NpyAuxData *wrappeddata = d->wrappeddata,
*todata = d->todata,
*fromdata = d->fromdata;
char *bufferin = d->bufferin, *bufferout = d->bufferout;
if (d->device != device) {
return;
}
for(;;) {
if (N > NPY_LOWLEVEL_BUFFER_BLOCKSIZE) {
tobuffer(bufferin, inner_src_itemsize, src, src_stride,
NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
src_itemsize, todata, device);
target_memset(bufferout, 0, dst_itemsize*NPY_LOWLEVEL_BUFFER_BLOCKSIZE, device);
wrapped(bufferout, dst_itemsize, bufferin, inner_src_itemsize,
NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
inner_src_itemsize, wrappeddata, device);
frombuffer(dst, dst_stride, bufferout, dst_itemsize,
NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
dst_itemsize, fromdata, device);
N -= NPY_LOWLEVEL_BUFFER_BLOCKSIZE;
src += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*src_stride;
dst += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*dst_stride;
}
else {
tobuffer(bufferin, inner_src_itemsize, src, src_stride, N,
src_itemsize, todata, device);
target_memset(bufferout, 0, dst_itemsize*N, device);
wrapped(bufferout, dst_itemsize, bufferin, inner_src_itemsize, N,
inner_src_itemsize, wrappeddata, device);
frombuffer(dst, dst_stride, bufferout, dst_itemsize, N,
dst_itemsize, fromdata, device);
return;
}
}
}
/*
* Wraps an aligned contig to contig transfer function between either
* copies or byte swaps to temporary buffers.
*
* src_itemsize/dst_itemsize - The sizes of the src and dst datatypes.
* tobuffer - copy/swap function from src to an aligned contiguous buffer.
* todata - data for tobuffer
* frombuffer - copy/swap function from an aligned contiguous buffer to dst.
* fromdata - data for frombuffer
* wrapped - contig to contig transfer function being wrapped
* wrappeddata - data for wrapped
* init_dest - 1 means to memset the dest buffer to 0 before calling wrapped.
*
* Returns NPY_SUCCEED or NPY_FAIL.
*/
NPY_NO_EXPORT int
wrap_aligned_contig_transfer_function(
int device,
npy_intp src_itemsize, npy_intp dst_itemsize,
PyMicArray_StridedUnaryOp *tobuffer, NpyAuxData *todata,
PyMicArray_StridedUnaryOp *frombuffer, NpyAuxData *fromdata,
PyMicArray_StridedUnaryOp *wrapped, NpyAuxData *wrappeddata,
int init_dest,
PyMicArray_StridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata)
{
_align_wrap_data *data;
npy_intp basedatasize, buffersize, datasize;
/* Round up the structure size to 16-byte boundary */
basedatasize = (sizeof(_align_wrap_data)+15)&(-0x10);
/* Add space for two low level buffers */
buffersize = NPY_LOWLEVEL_BUFFER_BLOCKSIZE*src_itemsize +
NPY_LOWLEVEL_BUFFER_BLOCKSIZE*dst_itemsize;
datasize = basedatasize + buffersize;
/* Allocate the data, and populate it */
data = (_align_wrap_data *)PyArray_malloc(basedatasize);
if (data == NULL) {
PyErr_NoMemory();
return NPY_FAIL;
}
data->bufferin = (char *) target_alloc(buffersize, device);
if (data->bufferin == NULL) {
PyArray_free(data);
PyErr_NoMemory();
return NPY_FAIL;
}
data->base.free = &_align_wrap_data_free;
data->base.clone = &_align_wrap_data_clone;
data->device = device;
data->tobuffer = tobuffer;
data->todata = todata;
data->frombuffer = frombuffer;
data->fromdata = fromdata;
data->wrapped = wrapped;
data->wrappeddata = wrappeddata;
data->src_itemsize = src_itemsize;
data->dst_itemsize = dst_itemsize;
//data->bufferin = (char *)data + basedatasize;
data->bufferout = data->bufferin +
NPY_LOWLEVEL_BUFFER_BLOCKSIZE*src_itemsize;
/* Set the function and data */
if (init_dest) {
*out_stransfer = &_strided_to_strided_contig_align_wrap_init_dest;
}
else {
*out_stransfer = &_strided_to_strided_contig_align_wrap;
}
*out_transferdata = (NpyAuxData *)data;
return NPY_SUCCEED;
}
/*************************** DTYPE CAST FUNCTIONS *************************/
static int
get_nbo_cast_numeric_transfer_function(int aligned,
npy_intp src_stride, npy_intp dst_stride,
int src_type_num, int dst_type_num,
PyMicArray_StridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata)
{
/* Emit a warning if complex imaginary is being cast away */
if (PyTypeNum_ISCOMPLEX(src_type_num) &&
!PyTypeNum_ISCOMPLEX(dst_type_num) &&
!PyTypeNum_ISBOOL(dst_type_num)) {
PyObject *cls = NULL, *obj = NULL;
int ret;
obj = PyImport_ImportModule("numpy.core");
if (obj) {
cls = PyObject_GetAttrString(obj, "ComplexWarning");
Py_DECREF(obj);
}
ret = PyErr_WarnEx(cls,
"Casting complex values to real discards "
"the imaginary part", 1);
Py_XDECREF(cls);
if (ret < 0) {
return NPY_FAIL;
}
}
*out_stransfer = PyMicArray_GetStridedNumericCastFn(aligned,
src_stride, dst_stride,
src_type_num, dst_type_num);
*out_transferdata = NULL;
if (*out_stransfer == NULL) {
PyErr_SetString(PyExc_ValueError,
"unexpected error in GetStridedNumericCastFn");
return NPY_FAIL;
}
return NPY_SUCCEED;
}
static int
get_nbo_cast_transfer_function(int aligned,
npy_intp src_stride, npy_intp dst_stride,
PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype,
int move_references,
PyMicArray_StridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata,
int *out_needs_api,
int *out_needs_wrap)
{
//_strided_cast_data *data;
//PyMicArray_VectorUnaryFunc *castfunc;
//PyArray_Descr *tmp_dtype;
npy_intp shape = 1, src_itemsize = src_dtype->elsize,
dst_itemsize = dst_dtype->elsize;
if (PyTypeNum_ISNUMBER(src_dtype->type_num) &&
PyTypeNum_ISNUMBER(dst_dtype->type_num)) {
*out_needs_wrap = !PyArray_ISNBO(src_dtype->byteorder) ||
!PyArray_ISNBO(dst_dtype->byteorder);
return get_nbo_cast_numeric_transfer_function(aligned,
src_stride, dst_stride,
src_dtype->type_num, dst_dtype->type_num,
out_stransfer, out_transferdata);
}
*out_stransfer = NULL;
*out_transferdata = NULL;
return NPY_FAIL;
}
static int
get_cast_transfer_function(int device, int aligned,
npy_intp src_stride, npy_intp dst_stride,
PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype,
int move_references,
PyMicArray_StridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata,
int *out_needs_api)
{
PyMicArray_StridedUnaryOp *caststransfer;
NpyAuxData *castdata, *todata = NULL, *fromdata = NULL;
int needs_wrap = 0;
npy_intp src_itemsize = src_dtype->elsize,
dst_itemsize = dst_dtype->elsize;
if (get_nbo_cast_transfer_function(aligned,
src_stride, dst_stride,
src_dtype, dst_dtype,
move_references,
&caststransfer,
&castdata,
out_needs_api,
&needs_wrap) != NPY_SUCCEED) {
return NPY_FAIL;
}
/*
* If all native byte order and doesn't need alignment wrapping,
* return the function
*/
if (!needs_wrap) {
*out_stransfer = caststransfer;
*out_transferdata = castdata;
return NPY_SUCCEED;
}
/* Otherwise, we have to copy and/or swap to aligned temporaries */
else {
PyMicArray_StridedUnaryOp *tobuffer, *frombuffer;
/* Get the copy/swap operation from src */
PyMicArray_GetDTypeCopySwapFn(aligned,
src_stride, src_itemsize,
src_dtype,
&tobuffer, &todata);
/* Get the copy/swap operation to dst */
PyMicArray_GetDTypeCopySwapFn(aligned,
dst_itemsize, dst_stride,
dst_dtype,
&frombuffer, &fromdata);
if (frombuffer == NULL || tobuffer == NULL) {
NPY_AUXDATA_FREE(castdata);
NPY_AUXDATA_FREE(todata);
NPY_AUXDATA_FREE(fromdata);
return NPY_FAIL;
}
*out_stransfer = caststransfer;
/* Wrap it all up in a new transfer function + data */
if (wrap_aligned_contig_transfer_function(
device,
src_itemsize, dst_itemsize,
tobuffer, todata,
frombuffer, fromdata,
caststransfer, castdata,
PyDataType_FLAGCHK(dst_dtype, NPY_NEEDS_INIT),
out_stransfer, out_transferdata) != NPY_SUCCEED) {
NPY_AUXDATA_FREE(castdata);
NPY_AUXDATA_FREE(todata);
NPY_AUXDATA_FREE(fromdata);
return NPY_FAIL;
}
return NPY_SUCCEED;
}
}
/********************* DTYPE COPY SWAP FUNCTION ***********************/
NPY_NO_EXPORT int
PyMicArray_GetDTypeCopySwapFn(int aligned,
npy_intp src_stride, npy_intp dst_stride,
PyArray_Descr *dtype,
PyMicArray_StridedUnaryOp **outstransfer,
NpyAuxData **outtransferdata)
{
npy_intp itemsize = dtype->elsize;
/* If it's a custom data type, wrap its copy swap function */
if (dtype->type_num >= NPY_NTYPES) {
*outstransfer = NULL;
*outtransferdata = NULL;
}
/* A straight copy */
else if (itemsize == 1 || PyArray_ISNBO(dtype->byteorder)) {
*outstransfer = PyMicArray_GetStridedCopyFn(aligned,
src_stride, dst_stride,
itemsize);
*outtransferdata = NULL;
}
else if (dtype->kind == 'U') {
*outstransfer = NULL;
*outtransferdata = NULL;
}
/* If it's not complex, one swap */
else if (dtype->kind != 'c') {
*outstransfer = PyMicArray_GetStridedCopySwapFn(aligned,
src_stride, dst_stride,
itemsize);
*outtransferdata = NULL;
}
/* If complex, a paired swap */
else {
*outstransfer = PyMicArray_GetStridedCopySwapPairFn(aligned,
src_stride, dst_stride,
itemsize);
*outtransferdata = NULL;
}
return (*outstransfer == NULL) ? NPY_FAIL : NPY_SUCCEED;
}
/********************* MAIN DTYPE TRANSFER FUNCTION ***********************/
NPY_NO_EXPORT int
PyMicArray_GetDTypeTransferFunction(int device, int aligned,
npy_intp src_stride, npy_intp dst_stride,
PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype,
int move_references,
PyMicArray_StridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata,
int *out_needs_api)
{
npy_intp src_itemsize, dst_itemsize;
int src_type_num, dst_type_num;
#if NPY_DT_DBG_TRACING
printf("Calculating dtype transfer from ");
PyObject_Print((PyObject *)src_dtype, stdout, 0);
printf(" to ");
PyObject_Print((PyObject *)dst_dtype, stdout, 0);
printf("\n");
#endif
/*
* If one of the dtypes is NULL, we give back either a src decref
* function or a dst setzero function
*/
if (dst_dtype == NULL) {
if (move_references) {
return NPY_FAIL;
}
else {
*out_stransfer = &_dec_src_ref_nop;
*out_transferdata = NULL;
return NPY_SUCCEED;
}
}
else if (src_dtype == NULL) {
return get_setdstzero_transfer_function(aligned,
dst_dtype->elsize,
dst_dtype,
out_stransfer, out_transferdata,
out_needs_api);
}
src_itemsize = src_dtype->elsize;
dst_itemsize = dst_dtype->elsize;
src_type_num = src_dtype->type_num;
dst_type_num = dst_dtype->type_num;
/* Common special case - number -> number NBO cast */
if (PyTypeNum_ISNUMBER(src_type_num) &&
PyTypeNum_ISNUMBER(dst_type_num) &&
PyArray_ISNBO(src_dtype->byteorder) &&
PyArray_ISNBO(dst_dtype->byteorder)) {
if (PyArray_EquivTypenums(src_type_num, dst_type_num)) {
*out_stransfer = PyMicArray_GetStridedCopyFn(aligned,
src_stride, dst_stride,
src_itemsize);
*out_transferdata = NULL;
return (*out_stransfer == NULL) ? NPY_FAIL : NPY_SUCCEED;
}
else {
return get_nbo_cast_numeric_transfer_function (aligned,
src_stride, dst_stride,
src_type_num, dst_type_num,
out_stransfer, out_transferdata);
}
}
/*
* If there are no references and the data types are equivalent,
* return a simple copy
*/
if (!PyDataType_REFCHK(src_dtype) && !PyDataType_REFCHK(dst_dtype) &&
PyArray_EquivTypes(src_dtype, dst_dtype)) {
/*
* We can't pass through the aligned flag because it's not
* appropriate. Consider a size-8 string, it will say it's
* aligned because strings only need alignment 1, but the
* copy function wants to know if it's alignment 8.
*
* TODO: Change align from a flag to a "best power of 2 alignment"
* which holds the strongest alignment value for all
* the data which will be used.
*/
*out_stransfer = PyMicArray_GetStridedCopyFn(0,
src_stride, dst_stride,
src_dtype->elsize);
*out_transferdata = NULL;
return NPY_SUCCEED;
}
/* First look at the possibilities of just a copy or swap */
if (src_itemsize == dst_itemsize && src_dtype->kind == dst_dtype->kind &&
!PyDataType_HASFIELDS(src_dtype) &&
!PyDataType_HASFIELDS(dst_dtype) &&
!PyDataType_HASSUBARRAY(src_dtype) &&
!PyDataType_HASSUBARRAY(dst_dtype) &&
src_type_num != NPY_DATETIME && src_type_num != NPY_TIMEDELTA) {
/* A custom data type requires that we use its copy/swap */
if (src_type_num >= NPY_NTYPES || dst_type_num >= NPY_NTYPES) {
/*
* If the sizes and kinds are identical, but they're different
* custom types, then get a cast function
*/
return NPY_FAIL;
}
/* The special types, which have no or subelement byte-order */
switch (src_type_num) {
case NPY_UNICODE:
case NPY_VOID:
case NPY_STRING:
case NPY_OBJECT:
return NPY_FAIL;
}
/* This is a straight copy */
if (src_itemsize == 1 || PyArray_ISNBO(src_dtype->byteorder) ==
PyArray_ISNBO(dst_dtype->byteorder)) {
*out_stransfer = PyMicArray_GetStridedCopyFn(aligned,
src_stride, dst_stride,
src_itemsize);
*out_transferdata = NULL;
return (*out_stransfer == NULL) ? NPY_FAIL : NPY_SUCCEED;
}
/* This is a straight copy + byte swap */
else if (!PyTypeNum_ISCOMPLEX(src_type_num)) {
*out_stransfer = PyMicArray_GetStridedCopySwapFn(aligned,
src_stride, dst_stride,
src_itemsize);
*out_transferdata = NULL;
return (*out_stransfer == NULL) ? NPY_FAIL : NPY_SUCCEED;
}
/* This is a straight copy + element pair byte swap */
else {
*out_stransfer = PyMicArray_GetStridedCopySwapPairFn(aligned,
src_stride, dst_stride,
src_itemsize);
*out_transferdata = NULL;
return (*out_stransfer == NULL) ? NPY_FAIL : NPY_SUCCEED;
}
}
/* Handle subarrays */
if (PyDataType_HASSUBARRAY(src_dtype) ||
PyDataType_HASSUBARRAY(dst_dtype)) {
return NPY_FAIL;
}
/* Handle fields */
if ((PyDataType_HASFIELDS(src_dtype) || PyDataType_HASFIELDS(dst_dtype)) &&
src_type_num != NPY_OBJECT && dst_type_num != NPY_OBJECT) {
//TODO: figure out what field is
return NPY_FAIL;
}
/* Check for different-sized strings, unicodes, or voids */
if (src_type_num == dst_type_num) {
switch (src_type_num) {
case NPY_UNICODE:
case NPY_STRING:
case NPY_VOID:
return NPY_FAIL;
}
}
/* Otherwise a cast is necessary */
return get_cast_transfer_function(device, aligned,
src_stride, dst_stride,
src_dtype, dst_dtype,
move_references,
out_stransfer, out_transferdata,
out_needs_api);
}
NPY_NO_EXPORT int
PyMicArray_GetMaskedDTypeTransferFunction(int aligned,
npy_intp src_stride,
npy_intp dst_stride,
npy_intp mask_stride,
PyArray_Descr *src_dtype,
PyArray_Descr *dst_dtype,
PyArray_Descr *mask_dtype,
int move_references,
PyMicArray_MaskedStridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata,
int *out_needs_api)
{
//TODO
return NPY_FAIL;
}
NPY_NO_EXPORT int
PyMicArray_CastRawArrays(npy_intp count,
char *src, char *dst,
npy_intp src_stride, npy_intp dst_stride,
PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype,
int move_references)
{
//TODO
return NPY_FAIL;
}
/*
* Prepares shape and strides for a simple raw array iteration.
* This sorts the strides into FORTRAN order, reverses any negative
* strides, then coalesces axes where possible. The results are
* filled in the output parameters.
*
* This is intended for simple, lightweight iteration over arrays
* where no buffering of any kind is needed, and the array may
* not be stored as a PyArrayObject.
*
* The arrays shape, out_shape, strides, and out_strides must all
* point to different data.
*
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
PyMicArray_PrepareOneRawArrayIter(int ndim, npy_intp *shape,
char *data, npy_intp *strides,
int *out_ndim, npy_intp *out_shape,
char **out_data, npy_intp *out_strides)
{
npy_stride_sort_item strideperm[NPY_MAXDIMS];
int i, j;
/* Special case 0 and 1 dimensions */
if (ndim == 0) {
*out_ndim = 1;
*out_data = data;
out_shape[0] = 1;
out_strides[0] = 0;
return 0;
}
else if (ndim == 1) {
npy_intp stride_entry = strides[0], shape_entry = shape[0];
*out_ndim = 1;
out_shape[0] = shape[0];
/* Always make a positive stride */
if (stride_entry >= 0) {
*out_data = data;
out_strides[0] = stride_entry;
}
else {
*out_data = data + stride_entry * (shape_entry - 1);
out_strides[0] = -stride_entry;
}
return 0;
}
/* Sort the axes based on the destination strides */
PyArray_CreateSortedStridePerm(ndim, strides, strideperm);
for (i = 0; i < ndim; ++i) {
int iperm = strideperm[ndim - i - 1].perm;
out_shape[i] = shape[iperm];
out_strides[i] = strides[iperm];
}
/* Reverse any negative strides */
for (i = 0; i < ndim; ++i) {
npy_intp stride_entry = out_strides[i], shape_entry = out_shape[i];
if (stride_entry < 0) {
data += stride_entry * (shape_entry - 1);
out_strides[i] = -stride_entry;
}
/* Detect 0-size arrays here */
if (shape_entry == 0) {
*out_ndim = 1;
*out_data = data;
out_shape[0] = 0;
out_strides[0] = 0;
return 0;
}
}
/* Coalesce any dimensions where possible */
i = 0;
for (j = 1; j < ndim; ++j) {
if (out_shape[i] == 1) {
/* Drop axis i */
out_shape[i] = out_shape[j];
out_strides[i] = out_strides[j];
}
else if (out_shape[j] == 1) {
/* Drop axis j */
}
else if (out_strides[i] * out_shape[i] == out_strides[j]) {
/* Coalesce axes i and j */
out_shape[i] *= out_shape[j];
}
else {
/* Can't coalesce, go to next i */
++i;
out_shape[i] = out_shape[j];
out_strides[i] = out_strides[j];
}
}
ndim = i+1;
#if 0
/* DEBUG */
{
printf("raw iter ndim %d\n", ndim);
printf("shape: ");
for (i = 0; i < ndim; ++i) {
printf("%d ", (int)out_shape[i]);
}
printf("\n");
printf("strides: ");
for (i = 0; i < ndim; ++i) {
printf("%d ", (int)out_strides[i]);
}
printf("\n");
}
#endif
*out_data = data;
*out_ndim = ndim;
return 0;
}
/*
* The same as PyArray_PrepareOneRawArrayIter, but for two
* operands instead of one. Any broadcasting of the two operands
* should have already been done before calling this function,
* as the ndim and shape is only specified once for both operands.
*
* Only the strides of the first operand are used to reorder
* the dimensions, no attempt to consider all the strides together
* is made, as is done in the NpyIter object.
*
* You can use this together with NPY_RAW_ITER_START and
* NPY_RAW_ITER_TWO_NEXT to handle the looping boilerplate of everything
* but the innermost loop (which is for idim == 0).
*
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
PyMicArray_PrepareTwoRawArrayIter(int ndim, npy_intp *shape,
char *dataA, npy_intp *stridesA,
char *dataB, npy_intp *stridesB,
int *out_ndim, npy_intp *out_shape,
char **out_dataA, npy_intp *out_stridesA,
char **out_dataB, npy_intp *out_stridesB)
{
npy_stride_sort_item strideperm[NPY_MAXDIMS];
int i, j;
/* Special case 0 and 1 dimensions */
if (ndim == 0) {
*out_ndim = 1;
*out_dataA = dataA;
*out_dataB = dataB;
out_shape[0] = 1;
out_stridesA[0] = 0;
out_stridesB[0] = 0;
return 0;
}
else if (ndim == 1) {
npy_intp stride_entryA = stridesA[0], stride_entryB = stridesB[0];
npy_intp shape_entry = shape[0];
*out_ndim = 1;
out_shape[0] = shape[0];
/* Always make a positive stride for the first operand */
if (stride_entryA >= 0) {
*out_dataA = dataA;
*out_dataB = dataB;
out_stridesA[0] = stride_entryA;
out_stridesB[0] = stride_entryB;
}
else {
*out_dataA = dataA + stride_entryA * (shape_entry - 1);
*out_dataB = dataB + stride_entryB * (shape_entry - 1);
out_stridesA[0] = -stride_entryA;
out_stridesB[0] = -stride_entryB;
}
return 0;
}
/* Sort the axes based on the destination strides */
PyArray_CreateSortedStridePerm(ndim, stridesA, strideperm);
for (i = 0; i < ndim; ++i) {
int iperm = strideperm[ndim - i - 1].perm;
out_shape[i] = shape[iperm];
out_stridesA[i] = stridesA[iperm];
out_stridesB[i] = stridesB[iperm];
}
/* Reverse any negative strides of operand A */
for (i = 0; i < ndim; ++i) {
npy_intp stride_entryA = out_stridesA[i];
npy_intp stride_entryB = out_stridesB[i];
npy_intp shape_entry = out_shape[i];
if (stride_entryA < 0) {
dataA += stride_entryA * (shape_entry - 1);
dataB += stride_entryB * (shape_entry - 1);
out_stridesA[i] = -stride_entryA;
out_stridesB[i] = -stride_entryB;
}
/* Detect 0-size arrays here */
if (shape_entry == 0) {
*out_ndim = 1;
*out_dataA = dataA;
*out_dataB = dataB;
out_shape[0] = 0;
out_stridesA[0] = 0;
out_stridesB[0] = 0;
return 0;
}
}
/* Coalesce any dimensions where possible */
i = 0;
for (j = 1; j < ndim; ++j) {
if (out_shape[i] == 1) {
/* Drop axis i */
out_shape[i] = out_shape[j];
out_stridesA[i] = out_stridesA[j];
out_stridesB[i] = out_stridesB[j];
}
else if (out_shape[j] == 1) {
/* Drop axis j */
}
else if (out_stridesA[i] * out_shape[i] == out_stridesA[j] &&
out_stridesB[i] * out_shape[i] == out_stridesB[j]) {
/* Coalesce axes i and j */
out_shape[i] *= out_shape[j];
}
else {
/* Can't coalesce, go to next i */
++i;
out_shape[i] = out_shape[j];
out_stridesA[i] = out_stridesA[j];
out_stridesB[i] = out_stridesB[j];
}
}
ndim = i+1;
*out_dataA = dataA;
*out_dataB = dataB;
*out_ndim = ndim;
return 0;
}
/*
* The same as PyArray_PrepareOneRawArrayIter, but for three
* operands instead of one. Any broadcasting of the three operands
* should have already been done before calling this function,
* as the ndim and shape is only specified once for all operands.
*
* Only the strides of the first operand are used to reorder
* the dimensions, no attempt to consider all the strides together
* is made, as is done in the NpyIter object.
*
* You can use this together with NPY_RAW_ITER_START and
* NPY_RAW_ITER_THREE_NEXT to handle the looping boilerplate of everything
* but the innermost loop (which is for idim == 0).
*
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
PyMicArray_PrepareThreeRawArrayIter(int ndim, npy_intp *shape,
char *dataA, npy_intp *stridesA,
char *dataB, npy_intp *stridesB,
char *dataC, npy_intp *stridesC,
int *out_ndim, npy_intp *out_shape,
char **out_dataA, npy_intp *out_stridesA,
char **out_dataB, npy_intp *out_stridesB,
char **out_dataC, npy_intp *out_stridesC)
{
npy_stride_sort_item strideperm[NPY_MAXDIMS];
int i, j;
/* Special case 0 and 1 dimensions */
if (ndim == 0) {
*out_ndim = 1;
*out_dataA = dataA;
*out_dataB = dataB;
*out_dataC = dataC;
out_shape[0] = 1;
out_stridesA[0] = 0;
out_stridesB[0] = 0;
out_stridesC[0] = 0;
return 0;
}
else if (ndim == 1) {
npy_intp stride_entryA = stridesA[0];
npy_intp stride_entryB = stridesB[0];
npy_intp stride_entryC = stridesC[0];
npy_intp shape_entry = shape[0];
*out_ndim = 1;
out_shape[0] = shape[0];
/* Always make a positive stride for the first operand */
if (stride_entryA >= 0) {
*out_dataA = dataA;
*out_dataB = dataB;
*out_dataC = dataC;
out_stridesA[0] = stride_entryA;
out_stridesB[0] = stride_entryB;
out_stridesC[0] = stride_entryC;
}
else {
*out_dataA = dataA + stride_entryA * (shape_entry - 1);
*out_dataB = dataB + stride_entryB * (shape_entry - 1);
*out_dataC = dataC + stride_entryC * (shape_entry - 1);
out_stridesA[0] = -stride_entryA;
out_stridesB[0] = -stride_entryB;
out_stridesC[0] = -stride_entryC;
}
return 0;
}
/* Sort the axes based on the destination strides */
PyArray_CreateSortedStridePerm(ndim, stridesA, strideperm);
for (i = 0; i < ndim; ++i) {
int iperm = strideperm[ndim - i - 1].perm;
out_shape[i] = shape[iperm];
out_stridesA[i] = stridesA[iperm];
out_stridesB[i] = stridesB[iperm];
out_stridesC[i] = stridesC[iperm];
}
/* Reverse any negative strides of operand A */
for (i = 0; i < ndim; ++i) {
npy_intp stride_entryA = out_stridesA[i];
npy_intp stride_entryB = out_stridesB[i];
npy_intp stride_entryC = out_stridesC[i];
npy_intp shape_entry = out_shape[i];
if (stride_entryA < 0) {
dataA += stride_entryA * (shape_entry - 1);
dataB += stride_entryB * (shape_entry - 1);
dataC += stride_entryC * (shape_entry - 1);
out_stridesA[i] = -stride_entryA;
out_stridesB[i] = -stride_entryB;
out_stridesC[i] = -stride_entryC;
}
/* Detect 0-size arrays here */
if (shape_entry == 0) {
*out_ndim = 1;
*out_dataA = dataA;
*out_dataB = dataB;
*out_dataC = dataC;
out_shape[0] = 0;
out_stridesA[0] = 0;
out_stridesB[0] = 0;
out_stridesC[0] = 0;
return 0;
}
}
/* Coalesce any dimensions where possible */
i = 0;
for (j = 1; j < ndim; ++j) {
if (out_shape[i] == 1) {
/* Drop axis i */
out_shape[i] = out_shape[j];
out_stridesA[i] = out_stridesA[j];
out_stridesB[i] = out_stridesB[j];
out_stridesC[i] = out_stridesC[j];
}
else if (out_shape[j] == 1) {
/* Drop axis j */
}
else if (out_stridesA[i] * out_shape[i] == out_stridesA[j] &&
out_stridesB[i] * out_shape[i] == out_stridesB[j] &&
out_stridesC[i] * out_shape[i] == out_stridesC[j]) {
/* Coalesce axes i and j */
out_shape[i] *= out_shape[j];
}
else {
/* Can't coalesce, go to next i */
++i;
out_shape[i] = out_shape[j];
out_stridesA[i] = out_stridesA[j];
out_stridesB[i] = out_stridesB[j];
out_stridesC[i] = out_stridesC[j];
}
}
ndim = i+1;
*out_dataA = dataA;
*out_dataB = dataB;
*out_dataC = dataC;
*out_ndim = ndim;
return 0;
}
|
friends2idlist.c | /*
FINDCENTER v0.01
Program to find the center of mass of friends-of-friends groups found with mfof in a gadget simulation
icc -lm -openmp -o ../bin/altix/findcenter findcenter.c libgad.o
gcc -std=c99 -lm -fopenmp -lgad -lgsl -lgslcblas -o ../bin/findcenter findcenter.c libgad.o
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <math.h>
#include <time.h>
#include "libgad.h"
#define PI 3.14159265358979323846
#define GAP2BORDER 9000 //min distance to boarders to ignore periodic boundaries
#define OD_RAD 4000 //Radius for calculation of environmental density (kpc/h)
#define INCLUDE 0 //(friends + INCLUDE*kpc/h) are used to determine CM
#define NBOX 6 //Number of adjacent boxes that are used in every direction
#define GRIDSIZE 1000
#define h0 0.72
#define MIN(a, b) ((a)<(b)?(a):(b))
#define MAX(a, b) ((a)>(b)?(a):(b))
#define ABS(a) ((a) >= 0 ? (a) : -(a))
#define CMP(a,b) ((a)>(b)?(1):(-1))
#define PB(a,b) ((a)>(b)?(a-b):(a))
#define MOVE(a,b) PB(a+b/2,b)
#define MV(a,b) ((a)+(b)/2)%(b)
#define MOVEB(a) MOVE((a),boxsz)
//#define SQR(x) (x)*(x)
#define SOFTENING 5.00
#define G 6.6742e-11
#define Msun 1.989e30
#define kpc 3.08567758128e19
// #ifdef LONGIDS
// typedef unsigned long long IDtype;
// #else
// typedef unsigned int IDtype;
// #endif
struct rank {
int ind;
int cnt;
};
void usage()
{
fprintf(stderr, "Findcenter v0.01\n");
fprintf(stderr, "-f <friendsfile>\n");
fprintf(stderr, "-s <gadget snapshotfile>\n");
fprintf(stderr, "-o <outputfile>\n");
fprintf(stderr, "-v [verbose]\n");
fprintf(stderr, "-gap [min-dist to boundaries for PB]\n");
fprintf(stderr, "\n");
exit(0);
}
int cmp_hcnt (const void *first, const void *second)
{
struct rank *a = (struct rank *) first;
struct rank *b = (struct rank *) second;
if (a->cnt > b->cnt) return -1;
else if (a->cnt < b->cnt) return +1;
else return 0;
}
int main (int argc, char *argv[])
{
struct rank *halo;
char friendsfile[256], gadgetfile[256], outfile[256];
FILE *fp;
struct header head;
struct gadpart *part;
int i,j,k,l,m,n,q, idum;
unsigned int numpart, ndm, ngas, checknpart;
int nhalo, minnum;
int *iclus, *hcnt, **indlist;
IDtype **idlist;
float fdum;
double ddum;
int verbose=0, pnum=0, usegas=0;
double boxsz;
double GAP=GAP2BORDER;
IDtype minID;
/***********************************************************************************
START
***********************************************************************************/
i=1;
strcpy(outfile,"cm.txt");
// GAP=0;
if (argc==1) usage();
while (i<argc)
{
if (!strcmp(argv[i],"-f"))
{
i++;
strcpy(friendsfile,argv[i]);
i++;
}
else if (!strcmp(argv[i],"-s"))
{
i++;
strcpy(gadgetfile,argv[i]);
i++;
}
else if (!strcmp(argv[i],"-o"))
{
i++;
strcpy(outfile,argv[i]);
i++;
}
else if (!strcmp(argv[i],"-gap"))
{
i++;
GAP=atof(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-v"))
{
i++;
verbose=1;
}
else if (!strcmp(argv[i],"-gas"))
{
i++;
usegas=1;
}
else if (!strcmp(argv[i],"-n"))
{
i++;
pnum=1;
}
else usage();
}
//READ Gadget snapshot file
//#pragma omp parallel sections num_threads(2) private( i, j, k)
// {
//#pragma omp section
// {
if (!(numpart=readgadget_part(gadgetfile, &head, &part)))
{
extern int libgaderr;
printf("error reading gadgetfile, %d\n", libgaderr);
exit(1);
}
ndm =head.npart[1];
ngas=head.npart[0];
boxsz=head.boxsize;
// }
//#pragma omp section
// {
// READ friends-file
fp=fopen(friendsfile,"r"); //read friendsfile
fscanf(fp,"%d %lf %d %f %d", &checknpart, &ddum, &nhalo, &fdum, &minnum);
if (ABS(ddum-head.time)>1e-4) printf("Possible Snapshot-Friendsfile Mismatch (Time, %g != %g)!\n", ddum, head.time);
iclus=(int *)calloc(checknpart, sizeof(int));
hcnt = (int *) calloc(nhalo, sizeof(int));
indlist = (int **) malloc(nhalo * sizeof(int *));
idlist = (IDtype **) malloc(nhalo * sizeof(IDtype *));
halo= (struct rank *) malloc (nhalo * sizeof(struct rank));
for (i=0; i < checknpart; i++)
{
fscanf(fp,"%d %d", &idum, &iclus[i]);
if ((iclus[i]>nhalo) || (iclus[i]<0)) {printf("Error in friendsfile!\n"); exit(1);}
if (iclus[i]!=0) hcnt[iclus[i]-1]++;
}
fclose(fp);
for (i=0; i< nhalo; i++)
{
halo[i].ind=i;
halo[i].cnt=hcnt[i];
indlist[i] = (int *) malloc (hcnt[i]* sizeof(int));
if (indlist[i]==NULL) {printf("malloc failed (indlist)\n");exit(1);}
idlist[i] = (IDtype *) calloc (hcnt[i], sizeof(IDtype));
if (idlist[i]==NULL) {printf("malloc failed (idlist)\n");exit(1);}
}
qsort(halo, nhalo, sizeof(struct rank), cmp_hcnt);
// }
// }
{
int *nind;
nind = (int *) calloc(nhalo, sizeof(int));
minID = part[0].id;
for (i=0; i < checknpart; i++)
{
if (iclus[i]!=0)
{
idlist[iclus[i]-1][nind[iclus[i]-1]] = part[i+ngas].id;
if (part[i+ngas].id < minID) minID = part[i+ngas].id;
if (part[i+ngas].id == 259725696)
{
printf("!#!#!#!#!#!#!\n");
}
indlist[iclus[i]-1][nind[iclus[i]-1]++]= i+ngas;
}
}
free(nind);
}
if (!usegas)
{
if (checknpart!=ndm) printf("Possible Snapshot-Friendsfile Mismatch (particle number, ndm) %lu != %lu!\n", checknpart, ndm);
}
else
{
if (checknpart!=ngas) printf("Possible Snapshot-Friendsfile Mismatch (particle number, ngas) %lu != %lu!\n", checknpart, ngas);
}
if (verbose) {
#ifdef _OPENMP
#pragma omp parallel private(i)
{
i= omp_get_thread_num();
// printf("this is thread number %d \n", i);
// #pragma omp barrier
if (i==0)
{
printf ("Number of threads: %d\n", omp_get_num_threads());
}
}
#endif
}
if (verbose) {printf("Number of fof-groups %d\n", nhalo);fflush(stdout);}
/************************************************************************************/
/* Start of main loop */
/************************************************************************************/
#pragma omp parallel for private(i, j, k, l, m, n, q)
//for (int tr_halo=0; tr_halo < nhalo; tr_halo++)
for ( i=0; i < nhalo; i++)
{
int tr_halo = halo[i].ind;
if (hcnt ==NULL) {printf("malloc failed (hcnt)\n");exit(1);}
if (halo == NULL) {printf("malloc failed (hdata)\n");exit(1);}
l=indlist[tr_halo][0];
char idlistname[128];
sprintf(idlistname, "idlist_%d", i);
FILE *fp = fopen(idlistname, "w");
if (fp == NULL) continue;
float maxdist = 0;
fwrite(&hcnt[tr_halo], sizeof(int), 1, fp);
fwrite(&(part[l].pos[0]), sizeof(float), 3, fp);
fwrite(&maxdist, sizeof(float), 1, fp);
fwrite(&(idlist[tr_halo][0]), sizeof(IDtype), hcnt[tr_halo], fp);
fclose(fp);
}
printf("minID %lu\n", minID);
return 0;
}
|
lensing.c | /** @file lensing.c Documented lensing module
*
* Simon Prunet and Julien Lesgourgues, 6.12.2010
*
* This module computes the lensed temperature and polarization
* anisotropy power spectra \f$ C_l^{X}, P(k), ... \f$'s given the
* unlensed temperature, polarization and lensing potential spectra.
*
* Follows Challinor and Lewis full-sky method, astro-ph/0502425
*
* The following functions can be called from other modules:
*
* -# lensing_init() at the beginning (but after spectra_init())
* -# lensing_cl_at_l() at any time for computing Cl_lensed at any l
* -# lensing_free() at the end
*/
#include "lensing.h"
#include <time.h>
/**
* Anisotropy power spectra \f$ C_l\f$'s for all types, modes and initial conditions.
* SO FAR: ONLY SCALAR
*
* This routine evaluates all the lensed \f$ C_l\f$'s at a given value of l by
* picking it in the pre-computed table. When relevant, it also
* sums over all initial conditions for each mode, and over all modes.
*
* This function can be called from whatever module at whatever time,
* provided that lensing_init() has been called before, and
* lensing_free() has not been called yet.
*
* @param ple Input: pointer to lensing structure
* @param l Input: multipole number
* @param cl_lensed Output: lensed \f$ C_l\f$'s for all types (TT, TE, EE, etc..)
* @return the error status
*/
int lensing_cl_at_l(
struct lensing * ple,
int l,
double * cl_lensed /* array with argument cl_lensed[index_ct] (must be already allocated) */
) {
int last_index;
int index_lt;
class_test(l > ple->l_lensed_max,
ple->error_message,
"you asked for lensed Cls at l=%d, they were computed only up to l=%d, you should increase l_max_scalars or decrease the precision parameter delta_l_max",l,ple->l_lensed_max);
class_call(array_interpolate_spline(ple->l,
ple->l_size,
ple->cl_lens,
ple->ddcl_lens,
ple->lt_size,
l,
&last_index,
cl_lensed,
ple->lt_size,
ple->error_message),
ple->error_message,
ple->error_message);
/* set to zero for the types such that l<l_max */
for (index_lt=0; index_lt<ple->lt_size; index_lt++)
if ((int)l > ple->l_max_lt[index_lt])
cl_lensed[index_lt]=0.;
return _SUCCESS_;
}
/**
* This routine initializes the lensing structure (in particular,
* computes table of lensed anisotropy spectra \f$ C_l^{X} \f$)
*
* @param ppr Input: pointer to precision structure
* @param ppt Input: pointer to perturbation structure (just in case, not used in current version...)
* @param psp Input: pointer to spectra structure
* @param pnl Input: pointer to nonlinear structure
* @param ple Output: pointer to initialized lensing structure
* @return the error status
*/
int lensing_init(
struct precision * ppr,
struct perturbs * ppt,
struct spectra * psp,
struct nonlinear * pnl,
struct lensing * ple
) {
/** Summary: */
/** - Define local variables */
double * mu; /* mu[index_mu]: discretized values of mu
between -1 and 1, roots of Legendre polynomial */
double * w8; /* Corresponding Gauss-Legendre quadrature weights */
double theta,delta_theta;
double ** d00; /* dmn[index_mu][index_l] */
double ** d11;
double ** d2m2;
double ** d22 = NULL;
double ** d20 = NULL;
double ** d1m1;
double ** d31 = NULL;
double ** d40 = NULL;
double ** d3m1 = NULL;
double ** d3m3 = NULL;
double ** d4m2 = NULL;
double ** d4m4 = NULL;
double * buf_dxx; /* buffer */
double * Cgl; /* Cgl[index_mu] */
double * Cgl2; /* Cgl2[index_mu] */
double * sigma2; /* sigma[index_mu] */
double * ksi = NULL; /* ksi[index_mu] */
double * ksiX = NULL; /* ksiX[index_mu] */
double * ksip = NULL; /* ksip[index_mu] */
double * ksim = NULL; /* ksim[index_mu] */
double fac,fac1;
double X_000;
double X_p000;
double X_220;
double X_022;
double X_p022;
double X_121;
double X_132;
double X_242;
int num_mu,index_mu,icount;
int l;
double ll;
double * cl_unlensed; /* cl_unlensed[index_ct] */
double * cl_tt; /* unlensed cl, to be filled to avoid repeated calls to spectra_cl_at_l */
double * cl_te = NULL; /* unlensed cl, to be filled to avoid repeated calls to spectra_cl_at_l */
double * cl_ee = NULL; /* unlensed cl, to be filled to avoid repeated calls to spectra_cl_at_l */
double * cl_bb = NULL; /* unlensed cl, to be filled to avoid repeated calls to spectra_cl_at_l */
double * cl_pp; /* potential cl, to be filled to avoid repeated calls to spectra_cl_at_l */
double res,resX,lens;
double resp, resm, lensp, lensm;
double * sqrt1;
double * sqrt2;
double * sqrt3;
double * sqrt4;
double * sqrt5;
double ** cl_md_ic; /* array with argument
cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct] */
double ** cl_md; /* array with argument
cl_md[index_md][index_ct] */
int index_md;
/* Timing */
//double debut, fin;
//double cpu_time;
/** - check that we really want to compute at least one spectrum */
if (ple->has_lensed_cls == _FALSE_) {
if (ple->lensing_verbose > 0)
printf("No lensing requested. Lensing module skipped.\n");
return _SUCCESS_;
}
else {
if (ple->lensing_verbose > 0) {
printf("Computing lensed spectra ");
if (ppr->accurate_lensing==_TRUE_)
printf("(accurate mode)\n");
else
printf("(fast mode)\n");
}
}
/** - initialize indices and allocate some of the arrays in the
lensing structure */
class_call(lensing_indices(ppr,psp,ple),
ple->error_message,
ple->error_message);
/** - put all precision variables hare; will be stored later in precision structure */
/** - Last element in \f$ \mu \f$ will be for \f$ \mu=1 \f$, needed for sigma2.
The rest will be chosen as roots of a Gauss-Legendre quadrature **/
if (ppr->accurate_lensing == _TRUE_) {
num_mu=(ple->l_unlensed_max+ppr->num_mu_minus_lmax); /* Must be even ?? CHECK */
num_mu += num_mu%2; /* Force it to be even */
} else {
/* Integrate correlation function difference on [0,pi/16] */
num_mu = (ple->l_unlensed_max * 2 )/16;
}
/** - allocate array of \f$ \mu \f$ values, as well as quadrature weights */
class_alloc(mu,
num_mu*sizeof(double),
ple->error_message);
/* Reserve last element of mu for mu=1, needed for sigma2 */
mu[num_mu-1] = 1.0;
class_alloc(w8,
(num_mu-1)*sizeof(double),
ple->error_message);
if (ppr->accurate_lensing == _TRUE_) {
//debut = omp_get_wtime();
class_call(quadrature_gauss_legendre(mu,
w8,
num_mu-1,
ppr->tol_gauss_legendre,
ple->error_message),
ple->error_message,
ple->error_message);
//fin = omp_get_wtime();
//cpu_time = (fin-debut);
//printf("time in quadrature_gauss_legendre=%4.3f s\n",cpu_time);
} else { /* Crude integration on [0,pi/16]: Riemann sum on theta */
delta_theta = _PI_/16. / (double)(num_mu-1);
for (index_mu=0;index_mu<num_mu-1;index_mu++) {
theta = (index_mu+1)*delta_theta;
mu[index_mu] = cos(theta);
w8[index_mu] = sin(theta)*delta_theta; /* We integrate on mu */
}
}
/** - Compute \f$ d^l_{mm'} (\mu) \f$*/
icount = 0;
class_alloc(d00,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d11,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d1m1,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d2m2,
num_mu*sizeof(double*),
ple->error_message);
icount += 4*num_mu*(ple->l_unlensed_max+1);
if(ple->has_te==_TRUE_) {
class_alloc(d20,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d3m1,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d4m2,
num_mu*sizeof(double*),
ple->error_message);
icount += 3*num_mu*(ple->l_unlensed_max+1);
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
class_alloc(d22,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d31,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d3m3,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d40,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d4m4,
num_mu*sizeof(double*),
ple->error_message);
icount += 5*num_mu*(ple->l_unlensed_max+1);
}
icount += 5*(ple->l_unlensed_max+1); /* for arrays sqrt1[l] to sqrt5[l] */
/** - Allocate main contiguous buffer **/
class_alloc(buf_dxx,
icount * sizeof(double),
ple->error_message);
icount = 0;
for (index_mu=0; index_mu<num_mu; index_mu++) {
d00[index_mu] = &(buf_dxx[icount+index_mu * (ple->l_unlensed_max+1)]);
d11[index_mu] = &(buf_dxx[icount+(index_mu+num_mu) * (ple->l_unlensed_max+1)]);
d1m1[index_mu]= &(buf_dxx[icount+(index_mu+2*num_mu) * (ple->l_unlensed_max+1)]);
d2m2[index_mu]= &(buf_dxx[icount+(index_mu+3*num_mu) * (ple->l_unlensed_max+1)]);
}
icount += 4*num_mu*(ple->l_unlensed_max+1);
if (ple->has_te==_TRUE_) {
for (index_mu=0; index_mu<num_mu; index_mu++) {
d20[index_mu] = &(buf_dxx[icount+index_mu * (ple->l_unlensed_max+1)]);
d3m1[index_mu]= &(buf_dxx[icount+(index_mu+num_mu) * (ple->l_unlensed_max+1)]);
d4m2[index_mu]= &(buf_dxx[icount+(index_mu+2*num_mu) * (ple->l_unlensed_max+1)]);
}
icount += 3*num_mu*(ple->l_unlensed_max+1);
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
for (index_mu=0; index_mu<num_mu; index_mu++) {
d22[index_mu] = &(buf_dxx[icount+index_mu * (ple->l_unlensed_max+1)]);
d31[index_mu] = &(buf_dxx[icount+(index_mu+num_mu) * (ple->l_unlensed_max+1)]);
d3m3[index_mu]= &(buf_dxx[icount+(index_mu+2*num_mu) * (ple->l_unlensed_max+1)]);
d40[index_mu] = &(buf_dxx[icount+(index_mu+3*num_mu) * (ple->l_unlensed_max+1)]);
d4m4[index_mu]= &(buf_dxx[icount+(index_mu+4*num_mu) * (ple->l_unlensed_max+1)]);
}
icount += 5*num_mu*(ple->l_unlensed_max+1);
}
sqrt1 = &(buf_dxx[icount]);
icount += ple->l_unlensed_max+1;
sqrt2 = &(buf_dxx[icount]);
icount += ple->l_unlensed_max+1;
sqrt3 = &(buf_dxx[icount]);
icount += ple->l_unlensed_max+1;
sqrt4 = &(buf_dxx[icount]);
icount += ple->l_unlensed_max+1;
sqrt5 = &(buf_dxx[icount]);
icount += ple->l_unlensed_max+1;
//debut = omp_get_wtime();
class_call(lensing_d00(mu,num_mu,ple->l_unlensed_max,d00),
ple->error_message,
ple->error_message);
class_call(lensing_d11(mu,num_mu,ple->l_unlensed_max,d11),
ple->error_message,
ple->error_message);
class_call(lensing_d1m1(mu,num_mu,ple->l_unlensed_max,d1m1),
ple->error_message,
ple->error_message);
class_call(lensing_d2m2(mu,num_mu,ple->l_unlensed_max,d2m2),
ple->error_message,
ple->error_message);
//fin = omp_get_wtime();
//cpu_time = (fin-debut);
//printf("time in lensing_dxx=%4.3f s\n",cpu_time);
if (ple->has_te==_TRUE_) {
class_call(lensing_d20(mu,num_mu,ple->l_unlensed_max,d20),
ple->error_message,
ple->error_message);
class_call(lensing_d3m1(mu,num_mu,ple->l_unlensed_max,d3m1),
ple->error_message,
ple->error_message);
class_call(lensing_d4m2(mu,num_mu,ple->l_unlensed_max,d4m2),
ple->error_message,
ple->error_message);
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
class_call(lensing_d22(mu,num_mu,ple->l_unlensed_max,d22),
ple->error_message,
ple->error_message);
class_call(lensing_d31(mu,num_mu,ple->l_unlensed_max,d31),
ple->error_message,
ple->error_message);
class_call(lensing_d3m3(mu,num_mu,ple->l_unlensed_max,d3m3),
ple->error_message,
ple->error_message);
class_call(lensing_d40(mu,num_mu,ple->l_unlensed_max,d40),
ple->error_message,
ple->error_message);
class_call(lensing_d4m4(mu,num_mu,ple->l_unlensed_max,d4m4),
ple->error_message,
ple->error_message);
}
/** - compute \f$ Cgl(\mu)\f$, \f$ Cgl2(\mu) \f$ and sigma2(\f$\mu\f$) */
class_alloc(Cgl,
num_mu*sizeof(double),
ple->error_message);
class_alloc(Cgl2,
num_mu*sizeof(double),
ple->error_message);
class_alloc(sigma2,
(num_mu-1)*sizeof(double), /* Zero separation is omitted */
ple->error_message);
class_alloc(cl_unlensed,
psp->ct_size*sizeof(double),
ple->error_message);
/** - Locally store unlensed temperature \f$ cl_{tt}\f$ and potential \f$ cl_{pp}\f$ spectra **/
class_alloc(cl_tt,
(ple->l_unlensed_max+1)*sizeof(double),
ple->error_message);
if (ple->has_te==_TRUE_) {
class_alloc(cl_te,
(ple->l_unlensed_max+1)*sizeof(double),
ple->error_message);
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
class_alloc(cl_ee,
(ple->l_unlensed_max+1)*sizeof(double),
ple->error_message);
class_alloc(cl_bb,
(ple->l_unlensed_max+1)*sizeof(double),
ple->error_message);
}
class_alloc(cl_pp,
(ple->l_unlensed_max+1)*sizeof(double),
ple->error_message);
class_alloc(cl_md_ic,
psp->md_size*sizeof(double *),
ple->error_message);
class_alloc(cl_md,
psp->md_size*sizeof(double *),
ple->error_message);
for (index_md = 0; index_md < psp->md_size; index_md++) {
if (psp->md_size > 1)
class_alloc(cl_md[index_md],
psp->ct_size*sizeof(double),
ple->error_message);
if (psp->ic_size[index_md] > 1)
class_alloc(cl_md_ic[index_md],
psp->ic_ic_size[index_md]*psp->ct_size*sizeof(double),
ple->error_message);
}
for (l=2; l<=ple->l_unlensed_max; l++) {
class_call(spectra_cl_at_l(psp,l,cl_unlensed,cl_md,cl_md_ic),
psp->error_message,
ple->error_message);
cl_tt[l] = cl_unlensed[ple->index_lt_tt];
cl_pp[l] = cl_unlensed[ple->index_lt_pp];
if (ple->has_te==_TRUE_) {
cl_te[l] = cl_unlensed[ple->index_lt_te];
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
cl_ee[l] = cl_unlensed[ple->index_lt_ee];
cl_bb[l] = cl_unlensed[ple->index_lt_bb];
}
}
for (index_md = 0; index_md < psp->md_size; index_md++) {
if (psp->md_size > 1)
free(cl_md[index_md]);
if (psp->ic_size[index_md] > 1)
free(cl_md_ic[index_md]);
}
free(cl_md_ic);
free(cl_md);
/** - Compute sigma2\f$(\mu)\f$ and Cgl2(\f$\mu\f$) **/
//debut = omp_get_wtime();
#pragma omp parallel for \
private (index_mu,l) \
schedule (static)
for (index_mu=0; index_mu<num_mu; index_mu++) {
Cgl[index_mu]=0;
Cgl2[index_mu]=0;
for (l=2; l<=ple->l_unlensed_max; l++) {
Cgl[index_mu] += (2.*l+1.)*l*(l+1.)*
cl_pp[l]*d11[index_mu][l];
Cgl2[index_mu] += (2.*l+1.)*l*(l+1.)*
cl_pp[l]*d1m1[index_mu][l];
}
Cgl[index_mu] /= 4.*_PI_;
Cgl2[index_mu] /= 4.*_PI_;
}
for (index_mu=0; index_mu<num_mu-1; index_mu++) {
/* Cgl(1.0) - Cgl(mu) */
sigma2[index_mu] = Cgl[num_mu-1] - Cgl[index_mu];
}
//fin = omp_get_wtime();
//cpu_time = (fin-debut);
//printf("time in Cgl,Cgl2,sigma2=%4.3f s\n",cpu_time);
/** - compute ksi, ksi+, ksi-, ksiX */
/** - --> ksi is for TT **/
if (ple->has_tt==_TRUE_) {
class_calloc(ksi,
(num_mu-1),
sizeof(double),
ple->error_message);
}
/** - --> ksiX is for TE **/
if (ple->has_te==_TRUE_) {
class_calloc(ksiX,
(num_mu-1),
sizeof(double),
ple->error_message);
}
/** - --> ksip, ksim for EE, BB **/
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
class_calloc(ksip,
(num_mu-1),
sizeof(double),
ple->error_message);
class_calloc(ksim,
(num_mu-1),
sizeof(double),
ple->error_message);
}
for (l=2;l<=ple->l_unlensed_max;l++) {
ll = (double)l;
sqrt1[l]=sqrt((ll+2)*(ll+1)*ll*(ll-1));
sqrt2[l]=sqrt((ll+2)*(ll-1));
sqrt3[l]=sqrt((ll+3)*(ll-2));
sqrt4[l]=sqrt((ll+4)*(ll+3)*(ll-2.)*(ll-3));
sqrt5[l]=sqrt(ll*(ll+1));
}
//debut = omp_get_wtime();
#pragma omp parallel for \
private (index_mu,l,ll,res,resX,resp,resm,lens,lensp,lensm, \
fac,fac1,X_000,X_p000,X_220,X_022,X_p022,X_121,X_132,X_242) \
schedule (static)
for (index_mu=0;index_mu<num_mu-1;index_mu++) {
for (l=2;l<=ple->l_unlensed_max;l++) {
ll = (double)l;
fac = ll*(ll+1)/4.;
fac1 = (2*ll+1)/(4.*_PI_);
/* In the following we will keep terms of the form (sigma2)^k*(Cgl2)^m
with k+m <= 2 */
X_000 = exp(-fac*sigma2[index_mu]);
X_p000 = -fac*X_000;
/* X_220 = 0.25*sqrt1[l] * exp(-(fac-0.5)*sigma2[index_mu]); */
X_220 = 0.25*sqrt1[l] * X_000; /* Order 0 */
/* next 5 lines useless, but avoid compiler warning 'may be used uninitialized' */
X_242=0.;
X_132=0.;
X_121=0.;
X_p022=0.;
X_022=0.;
if (ple->has_te==_TRUE_ || ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
/* X_022 = exp(-(fac-1.)*sigma2[index_mu]); */
X_022 = X_000 * (1+sigma2[index_mu]*(1+0.5*sigma2[index_mu])); /* Order 2 */
X_p022 = -(fac-1.)*X_022; /* Old versions were missing the
minus sign in this line, which introduced a very small error
on the high-l C_l^TE lensed spectrum [credits for bug fix:
Selim Hotinli] */
/* X_242 = 0.25*sqrt4[l] * exp(-(fac-5./2.)*sigma2[index_mu]); */
X_242 = 0.25*sqrt4[l] * X_000; /* Order 0 */
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
/* X_121 = - 0.5*sqrt2[l] * exp(-(fac-2./3.)*sigma2[index_mu]);
X_132 = - 0.5*sqrt3[l] * exp(-(fac-5./3.)*sigma2[index_mu]); */
X_121 = -0.5*sqrt2[l] * X_000 * (1+2./3.*sigma2[index_mu]); /* Order 1 */
X_132 = -0.5*sqrt3[l] * X_000 * (1+5./3.*sigma2[index_mu]); /* Order 1 */
}
}
if (ple->has_tt==_TRUE_) {
res = fac1*cl_tt[l];
lens = (X_000*X_000*d00[index_mu][l] +
X_p000*X_p000*d1m1[index_mu][l]
*Cgl2[index_mu]*8./(ll*(ll+1)) +
(X_p000*X_p000*d00[index_mu][l] +
X_220*X_220*d2m2[index_mu][l])
*Cgl2[index_mu]*Cgl2[index_mu]);
if (ppr->accurate_lensing == _FALSE_) {
/* Remove unlensed correlation function */
lens -= d00[index_mu][l];
}
res *= lens;
ksi[index_mu] += res;
}
if (ple->has_te==_TRUE_) {
resX = fac1*cl_te[l];
lens = ( X_022*X_000*d20[index_mu][l] +
Cgl2[index_mu]*2.*X_p000/sqrt5[l] *
(X_121*d11[index_mu][l] + X_132*d3m1[index_mu][l]) +
0.5 * Cgl2[index_mu] * Cgl2[index_mu] *
( ( 2.*X_p022*X_p000+X_220*X_220 ) *
d20[index_mu][l] + X_220*X_242*d4m2[index_mu][l] ) );
if (ppr->accurate_lensing == _FALSE_) {
lens -= d20[index_mu][l];
}
resX *= lens;
ksiX[index_mu] += resX;
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
resp = fac1*(cl_ee[l]+cl_bb[l]);
resm = fac1*(cl_ee[l]-cl_bb[l]);
lensp = ( X_022*X_022*d22[index_mu][l] +
2.*Cgl2[index_mu]*X_132*X_121*d31[index_mu][l] +
Cgl2[index_mu]*Cgl2[index_mu] *
( X_p022*X_p022*d22[index_mu][l] +
X_242*X_220*d40[index_mu][l] ) );
lensm = ( X_022*X_022*d2m2[index_mu][l] +
Cgl2[index_mu] *
( X_121*X_121*d1m1[index_mu][l] +
X_132*X_132*d3m3[index_mu][l] ) +
0.5 * Cgl2[index_mu] * Cgl2[index_mu] *
( 2.*X_p022*X_p022*d2m2[index_mu][l] +
X_220*X_220*d00[index_mu][l] +
X_242*X_242*d4m4[index_mu][l] ) );
if (ppr->accurate_lensing == _FALSE_) {
lensp -= d22[index_mu][l];
lensm -= d2m2[index_mu][l];
}
resp *= lensp;
resm *= lensm;
ksip[index_mu] += resp;
ksim[index_mu] += resm;
}
}
}
//fin = omp_get_wtime();
//cpu_time = (fin-debut);
//printf("time in ksi=%4.3f s\n",cpu_time);
/** - compute lensed \f$ C_l\f$'s by integration */
//debut = omp_get_wtime();
if (ple->has_tt==_TRUE_) {
class_call(lensing_lensed_cl_tt(ksi,d00,w8,num_mu-1,ple),
ple->error_message,
ple->error_message);
if (ppr->accurate_lensing == _FALSE_) {
class_call(lensing_addback_cl_tt(ple,cl_tt),
ple->error_message,
ple->error_message);
}
}
if (ple->has_te==_TRUE_) {
class_call(lensing_lensed_cl_te(ksiX,d20,w8,num_mu-1,ple),
ple->error_message,
ple->error_message);
if (ppr->accurate_lensing == _FALSE_) {
class_call(lensing_addback_cl_te(ple,cl_te),
ple->error_message,
ple->error_message);
}
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
class_call(lensing_lensed_cl_ee_bb(ksip,ksim,d22,d2m2,w8,num_mu-1,ple),
ple->error_message,
ple->error_message);
if (ppr->accurate_lensing == _FALSE_) {
class_call(lensing_addback_cl_ee_bb(ple,cl_ee,cl_bb),
ple->error_message,
ple->error_message);
}
}
//fin=omp_get_wtime();
//cpu_time = (fin-debut);
//printf("time in final lensing computation=%4.3f s\n",cpu_time);
/** - spline computed \f$ C_l\f$'s in view of interpolation */
class_call(array_spline_table_lines(ple->l,
ple->l_size,
ple->cl_lens,
ple->lt_size,
ple->ddcl_lens,
_SPLINE_EST_DERIV_,
ple->error_message),
ple->error_message,
ple->error_message);
/** - Free lots of stuff **/
free(buf_dxx);
free(d00);
free(d11);
free(d1m1);
free(d2m2);
if (ple->has_te==_TRUE_) {
free(d20);
free(d3m1);
free(d4m2);
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
free(d22);
free(d31);
free(d3m3);
free(d40);
free(d4m4);
}
if (ple->has_tt==_TRUE_)
free(ksi);
if (ple->has_te==_TRUE_)
free(ksiX);
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
free(ksip);
free(ksim);
}
free(Cgl);
free(Cgl2);
free(sigma2);
free(mu);
free(w8);
free(cl_unlensed);
free(cl_tt);
if (ple->has_te==_TRUE_)
free(cl_te);
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
free(cl_ee);
free(cl_bb);
}
free(cl_pp);
/** - Exit **/
return _SUCCESS_;
}
/**
* This routine frees all the memory space allocated by lensing_init().
*
* To be called at the end of each run, only when no further calls to
* lensing_cl_at_l() are needed.
*
* @param ple Input: pointer to lensing structure (which fields must be freed)
* @return the error status
*/
int lensing_free(
struct lensing * ple
) {
if (ple->has_lensed_cls == _TRUE_) {
free(ple->l);
free(ple->cl_lens);
free(ple->ddcl_lens);
free(ple->l_max_lt);
}
return _SUCCESS_;
}
/**
* This routine defines indices and allocates tables in the lensing structure
*
* @param ppr Input: pointer to precision structure
* @param psp Input: pointer to spectra structure
* @param ple Input/output: pointer to lensing structure
* @return the error status
*/
int lensing_indices(
struct precision * ppr,
struct spectra * psp,
struct lensing * ple
){
int index_l;
double ** cl_md_ic; /* array with argument
cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct] */
double ** cl_md; /* array with argument
cl_md[index_md][index_ct] */
int index_md;
int index_lt;
/* indices of all Cl types (lensed and unlensed) */
if (psp->has_tt == _TRUE_) {
ple->has_tt = _TRUE_;
ple->index_lt_tt=psp->index_ct_tt;
}
else {
ple->has_tt = _FALSE_;
}
if (psp->has_ee == _TRUE_) {
ple->has_ee = _TRUE_;
ple->index_lt_ee=psp->index_ct_ee;
}
else {
ple->has_ee = _FALSE_;
}
if (psp->has_te == _TRUE_) {
ple->has_te = _TRUE_;
ple->index_lt_te=psp->index_ct_te;
}
else {
ple->has_te = _FALSE_;
}
if (psp->has_bb == _TRUE_) {
ple->has_bb = _TRUE_;
ple->index_lt_bb=psp->index_ct_bb;
}
else {
ple->has_bb = _FALSE_;
}
if (psp->has_pp == _TRUE_) {
ple->has_pp = _TRUE_;
ple->index_lt_pp=psp->index_ct_pp;
}
else {
ple->has_pp = _FALSE_;
}
if (psp->has_tp == _TRUE_) {
ple->has_tp = _TRUE_;
ple->index_lt_tp=psp->index_ct_tp;
}
else {
ple->has_tp = _FALSE_;
}
if (psp->has_dd == _TRUE_) {
ple->has_dd = _TRUE_;
ple->index_lt_dd=psp->index_ct_dd;
}
else {
ple->has_dd = _FALSE_;
}
if (psp->has_td == _TRUE_) {
ple->has_td = _TRUE_;
ple->index_lt_td=psp->index_ct_td;
}
else {
ple->has_td = _FALSE_;
}
if (psp->has_ll == _TRUE_) {
ple->has_ll = _TRUE_;
ple->index_lt_ll=psp->index_ct_ll;
}
else {
ple->has_ll = _FALSE_;
}
if (psp->has_tl == _TRUE_) {
ple->has_tl = _TRUE_;
ple->index_lt_tl=psp->index_ct_tl;
}
else {
ple->has_tl = _FALSE_;
}
ple->lt_size = psp->ct_size;
/* number of multipoles */
ple->l_unlensed_max = psp->l_max_tot;
ple->l_lensed_max = ple->l_unlensed_max - ppr->delta_l_max;
for (index_l=0; (index_l < psp->l_size_max) && (psp->l[index_l] <= ple->l_lensed_max); index_l++);
if (index_l < psp->l_size_max) index_l++; /* one more point in order to be able to interpolate till ple->l_lensed_max */
ple->l_size = index_l+1;
class_alloc(ple->l,ple->l_size*sizeof(double),ple->error_message);
for (index_l=0; index_l < ple->l_size; index_l++) {
ple->l[index_l] = psp->l[index_l];
}
/* allocate table where results will be stored */
class_alloc(ple->cl_lens,
ple->l_size*ple->lt_size*sizeof(double),
ple->error_message);
class_alloc(ple->ddcl_lens,
ple->l_size*ple->lt_size*sizeof(double),
ple->error_message);
/* fill with unlensed cls */
class_alloc(cl_md_ic,
psp->md_size*sizeof(double *),
ple->error_message);
class_alloc(cl_md,
psp->md_size*sizeof(double *),
ple->error_message);
for (index_md = 0; index_md < psp->md_size; index_md++) {
if (psp->md_size > 1)
class_alloc(cl_md[index_md],
psp->ct_size*sizeof(double),
ple->error_message);
if (psp->ic_size[index_md] > 1)
class_alloc(cl_md_ic[index_md],
psp->ic_ic_size[index_md]*psp->ct_size*sizeof(double),
ple->error_message);
}
for (index_l=0; index_l<ple->l_size; index_l++) {
class_call(spectra_cl_at_l(psp,ple->l[index_l],&(ple->cl_lens[index_l*ple->lt_size]),cl_md,cl_md_ic),
psp->error_message,
ple->error_message);
}
for (index_md = 0; index_md < psp->md_size; index_md++) {
if (psp->md_size > 1)
free(cl_md[index_md]);
if (psp->ic_size[index_md] > 1)
free(cl_md_ic[index_md]);
}
free(cl_md_ic);
free(cl_md);
/* we want to output Cl_lensed up to the same l_max as Cl_unlensed
(even if a number delta_l_max of extra values of l have been used
internally for more accurate results). Notable exception to the
above rule: ClBB_lensed(scalars) must be outputed at least up to the same l_max as
ClEE_unlensed(scalars) (since ClBB_unlensed is null for scalars)
*/
class_alloc(ple->l_max_lt,ple->lt_size*sizeof(double),ple->error_message);
for (index_lt = 0; index_lt < ple->lt_size; index_lt++) {
ple->l_max_lt[index_lt]=0.;
for (index_md = 0; index_md < psp->md_size; index_md++) {
ple->l_max_lt[index_lt]=MAX(ple->l_max_lt[index_lt],psp->l_max_ct[index_md][index_lt]);
if ((ple->has_bb == _TRUE_) && (ple->has_ee == _TRUE_) && (index_lt == ple->index_lt_bb)) {
ple->l_max_lt[index_lt]=MAX(ple->l_max_lt[index_lt],psp->l_max_ct[index_md][ple->index_lt_ee]);
}
}
}
return _SUCCESS_;
}
/**
* This routine computes the lensed power spectra by Gaussian quadrature
*
* @param ksi Input: Lensed correlation function (ksi[index_mu])
* @param d00 Input: Legendre polynomials (\f$ d^l_{00}\f$[l][index_mu])
* @param w8 Input: Legendre quadrature weights (w8[index_mu])
* @param nmu Input: Number of quadrature points (0<=index_mu<=nmu)
* @param ple Input/output: Pointer to the lensing structure
* @return the error status
*/
int lensing_lensed_cl_tt(
double *ksi,
double **d00,
double *w8,
int nmu,
struct lensing * ple
) {
double cle;
int imu;
int index_l;
/** Integration by Gauss-Legendre quadrature. **/
#pragma omp parallel for \
private (imu,index_l,cle) \
schedule (static)
for(index_l=0; index_l<ple->l_size; index_l++){
cle=0;
for (imu=0;imu<nmu;imu++) {
cle += ksi[imu]*d00[imu][(int)ple->l[index_l]]*w8[imu]; /* loop could be optimized */
}
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_tt]=cle*2.0*_PI_;
}
return _SUCCESS_;
}
/**
* This routine adds back the unlensed \f$ cl_{tt}\f$ power spectrum
* Used in case of fast (and BB inaccurate) integration of
* correlation functions.
*
* @param ple Input/output: Pointer to the lensing structure
* @param cl_tt Input: Array of unlensed power spectrum
* @return the error status
*/
int lensing_addback_cl_tt(
struct lensing * ple,
double *cl_tt) {
int index_l, l;
for (index_l=0; index_l<ple->l_size; index_l++) {
l = (int)ple->l[index_l];
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_tt] += cl_tt[l];
}
return _SUCCESS_;
}
/**
* This routine computes the lensed power spectra by Gaussian quadrature
*
* @param ksiX Input: Lensed correlation function (ksiX[index_mu])
* @param d20 Input: Wigner d-function (\f$ d^l_{20}\f$[l][index_mu])
* @param w8 Input: Legendre quadrature weights (w8[index_mu])
* @param nmu Input: Number of quadrature points (0<=index_mu<=nmu)
* @param ple Input/output: Pointer to the lensing structure
* @return the error status
*/
int lensing_lensed_cl_te(
double *ksiX,
double **d20,
double *w8,
int nmu,
struct lensing * ple
) {
double clte;
int imu;
int index_l;
/** Integration by Gauss-Legendre quadrature. **/
#pragma omp parallel for \
private (imu,index_l,clte) \
schedule (static)
for(index_l=0; index_l < ple->l_size; index_l++){
clte=0;
for (imu=0;imu<nmu;imu++) {
clte += ksiX[imu]*d20[imu][(int)ple->l[index_l]]*w8[imu]; /* loop could be optimized */
}
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_te]=clte*2.0*_PI_;
}
return _SUCCESS_;
}
/**
* This routine adds back the unlensed \f$ cl_{te}\f$ power spectrum
* Used in case of fast (and BB inaccurate) integration of
* correlation functions.
*
* @param ple Input/output: Pointer to the lensing structure
* @param cl_te Input: Array of unlensed power spectrum
* @return the error status
*/
int lensing_addback_cl_te(
struct lensing * ple,
double *cl_te) {
int index_l, l;
for (index_l=0; index_l<ple->l_size; index_l++) {
l = (int)ple->l[index_l];
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_te] += cl_te[l];
}
return _SUCCESS_;
}
/**
* This routine computes the lensed power spectra by Gaussian quadrature
*
* @param ksip Input: Lensed correlation function (ksi+[index_mu])
* @param ksim Input: Lensed correlation function (ksi-[index_mu])
* @param d22 Input: Wigner d-function (\f$ d^l_{22}\f$[l][index_mu])
* @param d2m2 Input: Wigner d-function (\f$ d^l_{2-2}\f$[l][index_mu])
* @param w8 Input: Legendre quadrature weights (w8[index_mu])
* @param nmu Input: Number of quadrature points (0<=index_mu<=nmu)
* @param ple Input/output: Pointer to the lensing structure
* @return the error status
*/
int lensing_lensed_cl_ee_bb(
double *ksip,
double *ksim,
double **d22,
double **d2m2,
double *w8,
int nmu,
struct lensing * ple
) {
double clp, clm;
int imu;
int index_l;
/** Integration by Gauss-Legendre quadrature. **/
#pragma omp parallel for \
private (imu,index_l,clp,clm) \
schedule (static)
for(index_l=0; index_l < ple->l_size; index_l++){
clp=0; clm=0;
for (imu=0;imu<nmu;imu++) {
clp += ksip[imu]*d22[imu][(int)ple->l[index_l]]*w8[imu]; /* loop could be optimized */
clm += ksim[imu]*d2m2[imu][(int)ple->l[index_l]]*w8[imu]; /* loop could be optimized */
}
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_ee]=(clp+clm)*_PI_;
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_bb]=(clp-clm)*_PI_;
}
return _SUCCESS_;
}
/**
* This routine adds back the unlensed \f$ cl_{ee}\f$, \f$ cl_{bb}\f$ power spectra
* Used in case of fast (and BB inaccurate) integration of
* correlation functions.
*
* @param ple Input/output: Pointer to the lensing structure
* @param cl_ee Input: Array of unlensed power spectrum
* @param cl_bb Input: Array of unlensed power spectrum
* @return the error status
*/
int lensing_addback_cl_ee_bb(
struct lensing * ple,
double * cl_ee,
double * cl_bb) {
int index_l, l;
for (index_l=0; index_l<ple->l_size; index_l++) {
l = (int)ple->l[index_l];
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_ee] += cl_ee[l];
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_bb] += cl_bb[l];
}
return _SUCCESS_;
}
/**
* This routine computes the d00 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d00 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d00(
double * mu,
int num_mu,
int lmax,
double ** d00
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
for (l=1; l<lmax; l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(2*ll+1)/(ll+1);
fac2[l] = sqrt((2*ll+3)/(2*ll-1))*ll/(ll+1);
fac3[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
dlm1=1.0/sqrt(2.); /* l=0 */
d00[index_mu][0]=dlm1*sqrt(2.);
dl=mu[index_mu] * sqrt(3./2.); /*l=1*/
d00[index_mu][1]=dl*sqrt(2./3.);
for(l=1;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d00 recurrence, supposed to be more stable */
dlp1 = fac1[l]*mu[index_mu]*dl - fac2[l]*dlm1;
d00[index_mu][l+1] = dlp1 * fac3[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3);
return _SUCCESS_;
}
/**
* This routine computes the d11 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d11 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d11(
double * mu,
int num_mu,
int lmax,
double ** d11
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=2;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(ll+1)*(2*ll+1)/(ll*(ll+2));
fac2[l] = 1.0/(ll*(ll+1.));
fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-1)*(ll+1)/(ll*(ll+2))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d11[index_mu][0]=0;
dlm1=(1.0+mu[index_mu])/2. * sqrt(3./2.); /*l=1*/
d11[index_mu][1]=dlm1 * sqrt(2./3.);
dl=(1.0+mu[index_mu])/2.*(2.0*mu[index_mu]-1.0) * sqrt(5./2.); /*l=2*/
d11[index_mu][2] = dl * sqrt(2./5.);
for(l=2;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d11 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]-fac2[l])*dl - fac3[l]*dlm1;
d11[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d1m1 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d1m1 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d1m1(
double * mu,
int num_mu,
int lmax,
double ** d1m1
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=2;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(ll+1)*(2*ll+1)/(ll*(ll+2));
fac2[l] = 1.0/(ll*(ll+1.));
fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-1)*(ll+1)/(ll*(ll+2))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d1m1[index_mu][0]=0;
dlm1=(1.0-mu[index_mu])/2. * sqrt(3./2.); /*l=1*/
d1m1[index_mu][1]=dlm1 * sqrt(2./3.);
dl=(1.0-mu[index_mu])/2.*(2.0*mu[index_mu]+1.0) * sqrt(5./2.); /*l=2*/
d1m1[index_mu][2] = dl * sqrt(2./5.);
for(l=2;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d1m1 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1;
d1m1[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d2m2 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d2m2 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d2m2(
double * mu,
int num_mu,
int lmax,
double ** d2m2
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=2;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(ll+1)*(2*ll+1)/((ll-1)*(ll+3));
fac2[l] = 4.0/(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-2)*(ll+2)/((ll-1)*(ll+3))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d2m2[index_mu][0]=0;
dlm1=0.; /*l=1*/
d2m2[index_mu][1]=0;
dl=(1.0-mu[index_mu])*(1.0-mu[index_mu])/4. * sqrt(5./2.); /*l=2*/
d2m2[index_mu][2] = dl * sqrt(2./5.);
for(l=2;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d2m2 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1;
d2m2[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d22 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d22 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d22(
double * mu,
int num_mu,
int lmax,
double ** d22
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=2;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(ll+1)*(2*ll+1)/((ll-1)*(ll+3));
fac2[l] = 4.0/(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-2)*(ll+2)/((ll-1)*(ll+3))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d22[index_mu][0]=0;
dlm1=0.; /*l=1*/
d22[index_mu][1]=0;
dl=(1.0+mu[index_mu])*(1.0+mu[index_mu])/4. * sqrt(5./2.); /*l=2*/
d22[index_mu][2] = dl * sqrt(2./5.);
for(l=2;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]-fac2[l])*dl - fac3[l]*dlm1;
d22[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d20 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d20 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d20(
double * mu,
int num_mu,
int lmax,
double ** d20
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=2;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-1)*(ll+3)));
fac3[l] = sqrt((2*ll+3)*(ll-2)*(ll+2)/((2*ll-1)*(ll-1)*(ll+3)));
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d20[index_mu][0]=0;
dlm1=0.; /*l=1*/
d20[index_mu][1]=0;
dl=sqrt(15.)/4.*(1-mu[index_mu]*mu[index_mu]); /*l=2*/
d20[index_mu][2] = dl * sqrt(2./5.);
for(l=2;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*mu[index_mu]*dl - fac3[l]*dlm1;
d20[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d31 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d31 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d31(
double * mu,
int num_mu,
int lmax,
double ** d31
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=3;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-2)*(ll+4)*ll*(ll+2))) * (ll+1);
fac2[l] = 3.0/(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)/(2*ll-1)*(ll-3)*(ll+3)*(ll-1)*(ll+1)/((ll-2)*(ll+4)*ll*(ll+2)))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d31[index_mu][0]=0;
d31[index_mu][1]=0;
dlm1=0.; /*l=2*/
d31[index_mu][2]=0;
dl=sqrt(105./2.)*(1+mu[index_mu])*(1+mu[index_mu])*(1-mu[index_mu])/8.; /*l=3*/
d31[index_mu][3] = dl * sqrt(2./7.);
for(l=3;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]-fac2[l])*dl - fac3[l]*dlm1;
d31[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d3m1 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d3m1 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d3m1(
double * mu,
int num_mu,
int lmax,
double ** d3m1
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=3;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-2)*(ll+4)*ll*(ll+2))) * (ll+1);
fac2[l] = 3.0/(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)/(2*ll-1)*(ll-3)*(ll+3)*(ll-1)*(ll+1)/((ll-2)*(ll+4)*ll*(ll+2)))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d3m1[index_mu][0]=0;
d3m1[index_mu][1]=0;
dlm1=0.; /*l=2*/
d3m1[index_mu][2]=0;
dl=sqrt(105./2.)*(1+mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/8.; /*l=3*/
d3m1[index_mu][3] = dl * sqrt(2./7.);
for(l=3;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1;
d3m1[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d3m3 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d3m3 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d3m3(
double * mu,
int num_mu,
int lmax,
double ** d3m3
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=3;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1))*(ll+1)/((ll-2)*(ll+4));
fac2[l] = 9.0/(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-3)*(ll+3)*(l+1)/((ll-2)*(ll+4)*ll);
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d3m3[index_mu][0]=0;
d3m3[index_mu][1]=0;
dlm1=0.; /*l=2*/
d3m3[index_mu][2]=0;
dl=sqrt(7./2.)*(1-mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/8.; /*l=3*/
d3m3[index_mu][3] = dl * sqrt(2./7.);
for(l=3;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1;
d3m3[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d40 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d40 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d40(
double * mu,
int num_mu,
int lmax,
double ** d40
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=4;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-3)*(ll+5)));
fac3[l] = sqrt((2*ll+3)*(ll-4)*(ll+4)/((2*ll-1)*(ll-3)*(ll+5)));
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d40[index_mu][0]=0;
d40[index_mu][1]=0;
d40[index_mu][2]=0;
dlm1=0.; /*l=3*/
d40[index_mu][3]=0;
dl=sqrt(315.)*(1+mu[index_mu])*(1+mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/16.; /*l=4*/
d40[index_mu][4] = dl * sqrt(2./9.);
for(l=4;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*mu[index_mu]*dl - fac3[l]*dlm1;
d40[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d4m2 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d4m2 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d4m2(
double * mu,
int num_mu,
int lmax,
double ** d4m2
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=4;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-3)*(ll+5)*(ll-1)*(ll+3))) * (ll+1.);
fac2[l] = 8./(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)*(ll-4)*(ll+4)*(ll-2)*(ll+2)/((2*ll-1)*(ll-3)*(ll+5)*(ll-1)*(ll+3)))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d4m2[index_mu][0]=0;
d4m2[index_mu][1]=0;
d4m2[index_mu][2]=0;
dlm1=0.; /*l=3*/
d4m2[index_mu][3]=0;
dl=sqrt(126.)*(1+mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/16.; /*l=4*/
d4m2[index_mu][4] = dl * sqrt(2./9.);
for(l=4;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1;
d4m2[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d4m4 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d4m4 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d4m4(
double * mu,
int num_mu,
int lmax,
double ** d4m4
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=4;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1))*(ll+1)/((ll-3)*(ll+5));
fac2[l] = 16./(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-4)*(ll+4)*(ll+1)/((ll-3)*(ll+5)*ll);
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d4m4[index_mu][0]=0;
d4m4[index_mu][1]=0;
d4m4[index_mu][2]=0;
dlm1=0.; /*l=3*/
d4m4[index_mu][3]=0;
dl=sqrt(9./2.)*(1-mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/16.; /*l=4*/
d4m4[index_mu][4] = dl * sqrt(2./9.);
for(l=4;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1;
d4m4[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
|
qnx_fmt_plug.c | /*
* This file is part of John the Ripper password cracker. Written to crack
* QNX shadow hash passwords. algorith is func(salt . pass x rounds+1)
* func is md5, sha256 or sha512. rounds defaults to 1000, BUT can be specified
* in the hash string and thus is not fixed.
*
* This software is Copyright (c) 2015 JimF, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_qnx;
#elif FMT_REGISTERS_H
john_register_one(&fmt_qnx);
#else
#include "arch.h"
#undef SIMD_COEF_32
#define FORCE_GENERIC_SHA2 1
#include "sha2.h"
#include "md5.h"
#define _GNU_SOURCE 1
#include <string.h>
#include "params.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#ifdef _OPENMP
#ifndef OMP_SCALE
#define OMP_SCALE 8
#endif
#include <omp.h>
#endif
#include "memdbg.h"
// NOTE, in SSE mode, even if NOT in OMP, we may need to scale, quite a bit, due to needing
// to 'group' passwords based upon length of password.
#ifdef SIMD_COEF_32
#ifdef _OPENMP
#define SIMD_COEF_SCALE (128/SIMD_COEF_32)
#else
#define SIMD_COEF_SCALE (256/SIMD_COEF_32)
#endif
#else
#define SIMD_COEF_SCALE 1
#endif
#define FORMAT_LABEL "qnx"
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME SHA256_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#define PLAINTEXT_LENGTH 48
#define SALT_SIZE sizeof(struct qnx_saltstruct)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define __QNX_CREATE_PROPER_TESTS_ARRAY__
#include "qnx_common.h"
static int (*saved_len);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
#ifdef SIMD_COEF_32
static int *(sk_by_len[PLAINTEXT_LENGTH+1]);
static int sk_by_lens[PLAINTEXT_LENGTH+1];
#endif
static struct qnx_saltstruct {
unsigned int len;
unsigned int type; // 5 for md5, 256 for sha256, 512 for sha512
unsigned int rounds;
unsigned char salt[SALT_LENGTH];
} *cur_salt;
static void init(struct fmt_main *self)
{
int omp_t = 1;
int max_crypts;
#ifdef _OPENMP
omp_t = omp_get_max_threads();
omp_t *= OMP_SCALE;
#endif
max_crypts = SIMD_COEF_SCALE * omp_t * MAX_KEYS_PER_CRYPT;
self->params.max_keys_per_crypt = max_crypts;
// we allocate 1 more than needed, and use that 'extra' value as a zero
// length PW to fill in the tail groups in MMX mode.
saved_len = mem_calloc(1 + max_crypts, sizeof(*saved_len));
saved_key = mem_calloc(1 + max_crypts, sizeof(*saved_key));
crypt_out = mem_calloc(1 + max_crypts, sizeof(*crypt_out));
#ifdef SIMD_COEF_32
for (omp_t = 1; omp_t <= PLAINTEXT_LENGTH; ++omp_t)
sk_by_len[omp_t] = mem_calloc(1+max_crypts, sizeof(int));
#endif
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
MEM_FREE(saved_len);
}
static void clear_keys(void) {
#ifdef SIMD_COEF_32
memset(sk_by_lens, 0, sizeof(sk_by_lens));
#endif
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_len[index] = len;
if (len > PLAINTEXT_LENGTH)
len = saved_len[index] = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, len);
saved_key[index][len] = 0;
#ifdef SIMD_COEF_32
sk_by_len[len][sk_by_lens[len]++] = index;
#endif
}
static char *get_key(int index)
{
saved_key[index][saved_len[index]] = 0;
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
int tot_todo = count, inc = 1, *MixOrder = NULL;
#ifdef SIMD_COEF_32
int usesse = 0;
if (cur_salt->type == 5) {
usesse = 1;
}
#ifdef SIMD_PARA_SHA256
if (cur_salt->type == 256) {
usesse = 1;
}
#endif
#ifdef SIMD_PARA_SHA512
if (cur_salt->type == 512)
usesse = 1;
#endif
if (usesse) {
int j, k;
MixOrder = (int*)mem_calloc((count+PLAINTEXT_LENGTH*MAX_KEYS_PER_CRYPT), sizeof(int));
tot_todo = 0;
saved_len[count] = 0; // point all 'tail' MMX buffer elements to this location.
for (j = 1; j < PLAINTEXT_LENGTH; ++j) {
for (k = 0; k < sk_by_lens[j]; ++k)
MixOrder[tot_todo++] = sk_by_len[k];
while (tot_todo % MAX_KEYS_PER_CRYPT)
MixOrder[tot_todo++] = count;
}
}
#endif
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < tot_todo; index += inc)
{
#ifdef SIMD_COEF_32
if (MixOrder) {
int len, len_tot=0;
switch(cur_salt->type) {
case 5:
case 256:
case 512:
}
} else
#endif
{
int i, len = saved_len[index];
char *pass = saved_key[index];
switch (cur_salt->type) {
case 5:
{
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, cur_salt->salt, cur_salt->len);
for (i = 0; i <= cur_salt->rounds; ++i)
MD5_Update(&ctx, pass, len);
MD5_Final((unsigned char*)(crypt_out[index]), &ctx);
break;
}
case 256:
{
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, cur_salt->salt, cur_salt->len);
for (i = 0; i <= cur_salt->rounds; ++i)
SHA256_Update(&ctx, pass, len);
SHA256_Final((unsigned char*)(crypt_out[index]), &ctx);
break;
}
case 512:
{
SHA512_CTX ctx;
SHA512_Init(&ctx);
SHA512_Update(&ctx, cur_salt->salt, cur_salt->len);
if (len && 128 % len == 0 && cur_salt->len+len*cur_salt->rounds > 256) {
// we can optimize this, by filling buffer (after the
// first salted buffer), and then simply calling
// jtr_sha512_hash_block 'natively' never having to
// refill the buffer again.
int ex;
for (i = 0; i <= cur_salt->rounds; ++i) {
SHA512_Update(&ctx, pass, len);
if (ctx.total > 128+cur_salt->len)
break;
}
++i;
ex = (256-ctx.total)/len;
i += ex;
ctx.total += ex*len;
jtr_sha512_hash_block(&ctx, ctx.buffer, 1);
while (i+128/len <= cur_salt->rounds) {
ctx.total += 128;
jtr_sha512_hash_block(&ctx, ctx.buffer, 1);
i += 128/len;
}
for (;i <= cur_salt->rounds; ++i)
ctx.total += len;
} else {
for (i = 0; i <= cur_salt->rounds; ++i)
SHA512_Update(&ctx, pass, len);
}
ctx.bIsQnxBuggy = 1;
SHA512_Final((unsigned char*)(crypt_out[index]), &ctx);
break;
}
default:
exit(fprintf(stderr, "Unknown QNX hash type found\n"));
}
}
}
MEM_FREE(MixOrder);
return count;
}
static void set_salt(void *salt)
{
cur_salt = salt;
}
static void *get_salt(char *ciphertext)
{
static struct qnx_saltstruct out;
char *origptr = strdup(ciphertext), *ct = origptr;
memset(&out, 0, sizeof(out));
ct = strtokm(&ct[1], "@");
if (*ct == 'm') out.type = 5;
else if (*ct == 's') out.type = 256;
else if (*ct == 'S') out.type = 512;
if (ct[1] == ',')
out.rounds = atoi(&ct[2]);
else
out.rounds = ROUNDS_DEFAULT;
ct = strtokm(NULL, "@");
ct = strtokm(NULL, "@");
out.len = strlen(ct);
memcpy(out.salt, ct, out.len);
MEM_FREE(origptr);
return &out;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
if(cur_salt->type == 5)
return !memcmp(binary, crypt_out[index], BINARY_SIZE_MD5);
if(cur_salt->type == 256)
return !memcmp(binary, crypt_out[index], BINARY_SIZE_SHA256);
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static unsigned int iteration_count(void *salt)
{
return ((struct qnx_saltstruct *)salt)->rounds;
}
static unsigned int algorithm_type(void *salt)
{
return ((struct qnx_saltstruct *)salt)->type;
}
// Public domain hash function by DJ Bernstein
// We are hashing the entire struct
static int salt_hash(void *salt)
{
unsigned char *s = (unsigned char *)salt;
unsigned int hash = 5381;
unsigned int i;
for (i = 0; i < sizeof(struct qnx_saltstruct); i++)
hash = ((hash << 5) + hash) ^ s[i];
return hash & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_qnx = {
{
FORMAT_LABEL,
FORMAT_NAME,
"QNX " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
"algorithm (5=md5 256=sha256 512=sha512)",
},
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
algorithm_type,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 8;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
convolution_1x1_pack8_fp16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_fp16_pack8_avx(const Mat& kernel, Mat& weight_data_pack8, int num_input, int num_output)
{
// src = kw-kh-inch-outch
// dst = 8b-8a-kw-kh-inch/8a-outch/8b
Mat weight_data_r2 = kernel.reshape(1, num_input, num_output);
weight_data_pack8.create(1, num_input / 8, num_output / 8, (size_t)2 * 64, 64);
for (int q = 0; q + 7 < num_output; q += 8)
{
const Mat k0 = weight_data_r2.channel(q);
const Mat k1 = weight_data_r2.channel(q + 1);
const Mat k2 = weight_data_r2.channel(q + 2);
const Mat k3 = weight_data_r2.channel(q + 3);
const Mat k4 = weight_data_r2.channel(q + 4);
const Mat k5 = weight_data_r2.channel(q + 5);
const Mat k6 = weight_data_r2.channel(q + 6);
const Mat k7 = weight_data_r2.channel(q + 7);
Mat g0 = weight_data_pack8.channel(q / 8);
for (int p = 0; p + 7 < num_input; p += 8)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k04 = k0.row(p + 4);
const float* k05 = k0.row(p + 5);
const float* k06 = k0.row(p + 6);
const float* k07 = k0.row(p + 7);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k14 = k1.row(p + 4);
const float* k15 = k1.row(p + 5);
const float* k16 = k1.row(p + 6);
const float* k17 = k1.row(p + 7);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k24 = k2.row(p + 4);
const float* k25 = k2.row(p + 5);
const float* k26 = k2.row(p + 6);
const float* k27 = k2.row(p + 7);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
const float* k34 = k3.row(p + 4);
const float* k35 = k3.row(p + 5);
const float* k36 = k3.row(p + 6);
const float* k37 = k3.row(p + 7);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p + 1);
const float* k42 = k4.row(p + 2);
const float* k43 = k4.row(p + 3);
const float* k44 = k4.row(p + 4);
const float* k45 = k4.row(p + 5);
const float* k46 = k4.row(p + 6);
const float* k47 = k4.row(p + 7);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p + 1);
const float* k52 = k5.row(p + 2);
const float* k53 = k5.row(p + 3);
const float* k54 = k5.row(p + 4);
const float* k55 = k5.row(p + 5);
const float* k56 = k5.row(p + 6);
const float* k57 = k5.row(p + 7);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p + 1);
const float* k62 = k6.row(p + 2);
const float* k63 = k6.row(p + 3);
const float* k64 = k6.row(p + 4);
const float* k65 = k6.row(p + 5);
const float* k66 = k6.row(p + 6);
const float* k67 = k6.row(p + 7);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p + 1);
const float* k72 = k7.row(p + 2);
const float* k73 = k7.row(p + 3);
const float* k74 = k7.row(p + 4);
const float* k75 = k7.row(p + 5);
const float* k76 = k7.row(p + 6);
const float* k77 = k7.row(p + 7);
unsigned short* g00 = (unsigned short*)g0.row(p / 8);
g00[0] = float32_to_float16(k00[0]);
g00[1] = float32_to_float16(k10[0]);
g00[2] = float32_to_float16(k20[0]);
g00[3] = float32_to_float16(k30[0]);
g00[4] = float32_to_float16(k40[0]);
g00[5] = float32_to_float16(k50[0]);
g00[6] = float32_to_float16(k60[0]);
g00[7] = float32_to_float16(k70[0]);
g00 += 8;
g00[0] = float32_to_float16(k01[0]);
g00[1] = float32_to_float16(k11[0]);
g00[2] = float32_to_float16(k21[0]);
g00[3] = float32_to_float16(k31[0]);
g00[4] = float32_to_float16(k41[0]);
g00[5] = float32_to_float16(k51[0]);
g00[6] = float32_to_float16(k61[0]);
g00[7] = float32_to_float16(k71[0]);
g00 += 8;
g00[0] = float32_to_float16(k02[0]);
g00[1] = float32_to_float16(k12[0]);
g00[2] = float32_to_float16(k22[0]);
g00[3] = float32_to_float16(k32[0]);
g00[4] = float32_to_float16(k42[0]);
g00[5] = float32_to_float16(k52[0]);
g00[6] = float32_to_float16(k62[0]);
g00[7] = float32_to_float16(k72[0]);
g00 += 8;
g00[0] = float32_to_float16(k03[0]);
g00[1] = float32_to_float16(k13[0]);
g00[2] = float32_to_float16(k23[0]);
g00[3] = float32_to_float16(k33[0]);
g00[4] = float32_to_float16(k43[0]);
g00[5] = float32_to_float16(k53[0]);
g00[6] = float32_to_float16(k63[0]);
g00[7] = float32_to_float16(k73[0]);
g00 += 8;
g00[0] = float32_to_float16(k04[0]);
g00[1] = float32_to_float16(k14[0]);
g00[2] = float32_to_float16(k24[0]);
g00[3] = float32_to_float16(k34[0]);
g00[4] = float32_to_float16(k44[0]);
g00[5] = float32_to_float16(k54[0]);
g00[6] = float32_to_float16(k64[0]);
g00[7] = float32_to_float16(k74[0]);
g00 += 8;
g00[0] = float32_to_float16(k05[0]);
g00[1] = float32_to_float16(k15[0]);
g00[2] = float32_to_float16(k25[0]);
g00[3] = float32_to_float16(k35[0]);
g00[4] = float32_to_float16(k45[0]);
g00[5] = float32_to_float16(k55[0]);
g00[6] = float32_to_float16(k65[0]);
g00[7] = float32_to_float16(k75[0]);
g00 += 8;
g00[0] = float32_to_float16(k06[0]);
g00[1] = float32_to_float16(k16[0]);
g00[2] = float32_to_float16(k26[0]);
g00[3] = float32_to_float16(k36[0]);
g00[4] = float32_to_float16(k46[0]);
g00[5] = float32_to_float16(k56[0]);
g00[6] = float32_to_float16(k66[0]);
g00[7] = float32_to_float16(k76[0]);
g00 += 8;
g00[0] = float32_to_float16(k07[0]);
g00[1] = float32_to_float16(k17[0]);
g00[2] = float32_to_float16(k27[0]);
g00[3] = float32_to_float16(k37[0]);
g00[4] = float32_to_float16(k47[0]);
g00[5] = float32_to_float16(k57[0]);
g00[6] = float32_to_float16(k67[0]);
g00[7] = float32_to_float16(k77[0]);
g00 += 8;
}
}
}
static void conv1x1s1_sgemm_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp(12, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, elemsize, elempack, opt.workspace_allocator);
{
int nn_size = size / 12;
int remain_size_start = nn_size * 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 12;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
__m256 _r4 = _mm256_loadu_ps(img0 + 32);
__m256 _r5 = _mm256_loadu_ps(img0 + 40);
__m256 _r6 = _mm256_loadu_ps(img0 + 48);
__m256 _r7 = _mm256_loadu_ps(img0 + 56);
__m256 _r8 = _mm256_loadu_ps(img0 + 64);
__m256 _r9 = _mm256_loadu_ps(img0 + 72);
__m256 _r10 = _mm256_loadu_ps(img0 + 80);
__m256 _r11 = _mm256_loadu_ps(img0 + 88);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
_mm256_storeu_ps(tmpptr + 32, _r4);
_mm256_storeu_ps(tmpptr + 40, _r5);
_mm256_storeu_ps(tmpptr + 48, _r6);
_mm256_storeu_ps(tmpptr + 56, _r7);
_mm256_storeu_ps(tmpptr + 64, _r8);
_mm256_storeu_ps(tmpptr + 72, _r9);
_mm256_storeu_ps(tmpptr + 80, _r10);
_mm256_storeu_ps(tmpptr + 88, _r11);
tmpptr += 96;
img0 += bottom_blob.cstep * 8;
}
}
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
__m256 _r4 = _mm256_loadu_ps(img0 + 32);
__m256 _r5 = _mm256_loadu_ps(img0 + 40);
__m256 _r6 = _mm256_loadu_ps(img0 + 48);
__m256 _r7 = _mm256_loadu_ps(img0 + 56);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
_mm256_storeu_ps(tmpptr + 32, _r4);
_mm256_storeu_ps(tmpptr + 40, _r5);
_mm256_storeu_ps(tmpptr + 48, _r6);
_mm256_storeu_ps(tmpptr + 56, _r7);
tmpptr += 64;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
tmpptr += 32;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
tmpptr += 16;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
_mm256_storeu_ps(tmpptr, _r0);
tmpptr += 8;
img0 += bottom_blob.cstep * 8;
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f);
float* outptr = out;
int i = 0;
for (; i + 11 < size; i += 12)
{
const float* tmpptr = tmp.channel(i / 12);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
__m256 _sum4 = _bias0;
__m256 _sum5 = _bias0;
__m256 _sum6 = _bias0;
__m256 _sum7 = _bias0;
__m256 _sum8 = _bias0;
__m256 _sum9 = _bias0;
__m256 _sum10 = _bias0;
__m256 _sum11 = _bias0;
const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = loadfp16(kptr);
__m256 _w1 = loadfp16(kptr + 8);
__m256 _w2 = loadfp16(kptr + 16);
__m256 _w3 = loadfp16(kptr + 24);
__m256 _w4 = loadfp16(kptr + 32);
__m256 _w5 = loadfp16(kptr + 40);
__m256 _w6 = loadfp16(kptr + 48);
__m256 _w7 = loadfp16(kptr + 56);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_sum0 = _mm256_comp_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w7, _val17, _sum1);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_sum2 = _mm256_comp_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w3, _val23, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w4, _val24, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w5, _val25, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w6, _val26, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w7, _val27, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w3, _val33, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w4, _val34, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w5, _val35, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w6, _val36, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w7, _val37, _sum3);
__m256 _val40 = _mm256_broadcast_ss(tmpptr + 32);
__m256 _val41 = _mm256_broadcast_ss(tmpptr + 33);
__m256 _val42 = _mm256_broadcast_ss(tmpptr + 34);
__m256 _val43 = _mm256_broadcast_ss(tmpptr + 35);
__m256 _val44 = _mm256_broadcast_ss(tmpptr + 36);
__m256 _val45 = _mm256_broadcast_ss(tmpptr + 37);
__m256 _val46 = _mm256_broadcast_ss(tmpptr + 38);
__m256 _val47 = _mm256_broadcast_ss(tmpptr + 39);
__m256 _val50 = _mm256_broadcast_ss(tmpptr + 40);
__m256 _val51 = _mm256_broadcast_ss(tmpptr + 41);
__m256 _val52 = _mm256_broadcast_ss(tmpptr + 42);
__m256 _val53 = _mm256_broadcast_ss(tmpptr + 43);
__m256 _val54 = _mm256_broadcast_ss(tmpptr + 44);
__m256 _val55 = _mm256_broadcast_ss(tmpptr + 45);
__m256 _val56 = _mm256_broadcast_ss(tmpptr + 46);
__m256 _val57 = _mm256_broadcast_ss(tmpptr + 47);
_sum4 = _mm256_comp_fmadd_ps(_w0, _val40, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w1, _val41, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w2, _val42, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w3, _val43, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w4, _val44, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w5, _val45, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w6, _val46, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w7, _val47, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_w0, _val50, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w1, _val51, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w2, _val52, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w3, _val53, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w4, _val54, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w5, _val55, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w6, _val56, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w7, _val57, _sum5);
__m256 _val60 = _mm256_broadcast_ss(tmpptr + 48);
__m256 _val61 = _mm256_broadcast_ss(tmpptr + 49);
__m256 _val62 = _mm256_broadcast_ss(tmpptr + 50);
__m256 _val63 = _mm256_broadcast_ss(tmpptr + 51);
__m256 _val64 = _mm256_broadcast_ss(tmpptr + 52);
__m256 _val65 = _mm256_broadcast_ss(tmpptr + 53);
__m256 _val66 = _mm256_broadcast_ss(tmpptr + 54);
__m256 _val67 = _mm256_broadcast_ss(tmpptr + 55);
__m256 _val70 = _mm256_broadcast_ss(tmpptr + 56);
__m256 _val71 = _mm256_broadcast_ss(tmpptr + 57);
__m256 _val72 = _mm256_broadcast_ss(tmpptr + 58);
__m256 _val73 = _mm256_broadcast_ss(tmpptr + 59);
__m256 _val74 = _mm256_broadcast_ss(tmpptr + 60);
__m256 _val75 = _mm256_broadcast_ss(tmpptr + 61);
__m256 _val76 = _mm256_broadcast_ss(tmpptr + 62);
__m256 _val77 = _mm256_broadcast_ss(tmpptr + 63);
_sum6 = _mm256_comp_fmadd_ps(_w0, _val60, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w1, _val61, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w2, _val62, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w3, _val63, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w4, _val64, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w5, _val65, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w6, _val66, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w7, _val67, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_w0, _val70, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w1, _val71, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w2, _val72, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w3, _val73, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w4, _val74, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w5, _val75, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w6, _val76, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w7, _val77, _sum7);
__m256 _val80 = _mm256_broadcast_ss(tmpptr + 64);
__m256 _val81 = _mm256_broadcast_ss(tmpptr + 65);
__m256 _val82 = _mm256_broadcast_ss(tmpptr + 66);
__m256 _val83 = _mm256_broadcast_ss(tmpptr + 67);
__m256 _val84 = _mm256_broadcast_ss(tmpptr + 68);
__m256 _val85 = _mm256_broadcast_ss(tmpptr + 69);
__m256 _val86 = _mm256_broadcast_ss(tmpptr + 70);
__m256 _val87 = _mm256_broadcast_ss(tmpptr + 71);
__m256 _val90 = _mm256_broadcast_ss(tmpptr + 72);
__m256 _val91 = _mm256_broadcast_ss(tmpptr + 73);
__m256 _val92 = _mm256_broadcast_ss(tmpptr + 74);
__m256 _val93 = _mm256_broadcast_ss(tmpptr + 75);
__m256 _val94 = _mm256_broadcast_ss(tmpptr + 76);
__m256 _val95 = _mm256_broadcast_ss(tmpptr + 77);
__m256 _val96 = _mm256_broadcast_ss(tmpptr + 78);
__m256 _val97 = _mm256_broadcast_ss(tmpptr + 79);
_sum8 = _mm256_comp_fmadd_ps(_w0, _val80, _sum8);
_sum8 = _mm256_comp_fmadd_ps(_w1, _val81, _sum8);
_sum8 = _mm256_comp_fmadd_ps(_w2, _val82, _sum8);
_sum8 = _mm256_comp_fmadd_ps(_w3, _val83, _sum8);
_sum8 = _mm256_comp_fmadd_ps(_w4, _val84, _sum8);
_sum8 = _mm256_comp_fmadd_ps(_w5, _val85, _sum8);
_sum8 = _mm256_comp_fmadd_ps(_w6, _val86, _sum8);
_sum8 = _mm256_comp_fmadd_ps(_w7, _val87, _sum8);
_sum9 = _mm256_comp_fmadd_ps(_w0, _val90, _sum9);
_sum9 = _mm256_comp_fmadd_ps(_w1, _val91, _sum9);
_sum9 = _mm256_comp_fmadd_ps(_w2, _val92, _sum9);
_sum9 = _mm256_comp_fmadd_ps(_w3, _val93, _sum9);
_sum9 = _mm256_comp_fmadd_ps(_w4, _val94, _sum9);
_sum9 = _mm256_comp_fmadd_ps(_w5, _val95, _sum9);
_sum9 = _mm256_comp_fmadd_ps(_w6, _val96, _sum9);
_sum9 = _mm256_comp_fmadd_ps(_w7, _val97, _sum9);
__m256 _val100 = _mm256_broadcast_ss(tmpptr + 80);
__m256 _val101 = _mm256_broadcast_ss(tmpptr + 81);
__m256 _val102 = _mm256_broadcast_ss(tmpptr + 82);
__m256 _val103 = _mm256_broadcast_ss(tmpptr + 83);
__m256 _val104 = _mm256_broadcast_ss(tmpptr + 84);
__m256 _val105 = _mm256_broadcast_ss(tmpptr + 85);
__m256 _val106 = _mm256_broadcast_ss(tmpptr + 86);
__m256 _val107 = _mm256_broadcast_ss(tmpptr + 87);
__m256 _val110 = _mm256_broadcast_ss(tmpptr + 88);
__m256 _val111 = _mm256_broadcast_ss(tmpptr + 89);
__m256 _val112 = _mm256_broadcast_ss(tmpptr + 90);
__m256 _val113 = _mm256_broadcast_ss(tmpptr + 91);
__m256 _val114 = _mm256_broadcast_ss(tmpptr + 92);
__m256 _val115 = _mm256_broadcast_ss(tmpptr + 93);
__m256 _val116 = _mm256_broadcast_ss(tmpptr + 94);
__m256 _val117 = _mm256_broadcast_ss(tmpptr + 95);
_sum10 = _mm256_comp_fmadd_ps(_w0, _val100, _sum10);
_sum10 = _mm256_comp_fmadd_ps(_w1, _val101, _sum10);
_sum10 = _mm256_comp_fmadd_ps(_w2, _val102, _sum10);
_sum10 = _mm256_comp_fmadd_ps(_w3, _val103, _sum10);
_sum10 = _mm256_comp_fmadd_ps(_w4, _val104, _sum10);
_sum10 = _mm256_comp_fmadd_ps(_w5, _val105, _sum10);
_sum10 = _mm256_comp_fmadd_ps(_w6, _val106, _sum10);
_sum10 = _mm256_comp_fmadd_ps(_w7, _val107, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_w0, _val110, _sum11);
_sum11 = _mm256_comp_fmadd_ps(_w1, _val111, _sum11);
_sum11 = _mm256_comp_fmadd_ps(_w2, _val112, _sum11);
_sum11 = _mm256_comp_fmadd_ps(_w3, _val113, _sum11);
_sum11 = _mm256_comp_fmadd_ps(_w4, _val114, _sum11);
_sum11 = _mm256_comp_fmadd_ps(_w5, _val115, _sum11);
_sum11 = _mm256_comp_fmadd_ps(_w6, _val116, _sum11);
_sum11 = _mm256_comp_fmadd_ps(_w7, _val117, _sum11);
tmpptr += 96;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
_mm256_storeu_ps(outptr + 32, _sum4);
_mm256_storeu_ps(outptr + 40, _sum5);
_mm256_storeu_ps(outptr + 48, _sum6);
_mm256_storeu_ps(outptr + 56, _sum7);
_mm256_storeu_ps(outptr + 64, _sum8);
_mm256_storeu_ps(outptr + 72, _sum9);
_mm256_storeu_ps(outptr + 80, _sum10);
_mm256_storeu_ps(outptr + 88, _sum11);
outptr += 96;
}
for (; i + 7 < size; i += 8)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
__m256 _sum4 = _bias0;
__m256 _sum5 = _bias0;
__m256 _sum6 = _bias0;
__m256 _sum7 = _bias0;
const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = loadfp16(kptr);
__m256 _w1 = loadfp16(kptr + 8);
__m256 _w2 = loadfp16(kptr + 16);
__m256 _w3 = loadfp16(kptr + 24);
__m256 _w4 = loadfp16(kptr + 32);
__m256 _w5 = loadfp16(kptr + 40);
__m256 _w6 = loadfp16(kptr + 48);
__m256 _w7 = loadfp16(kptr + 56);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_sum0 = _mm256_comp_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w7, _val17, _sum1);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_sum2 = _mm256_comp_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w3, _val23, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w4, _val24, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w5, _val25, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w6, _val26, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w7, _val27, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w3, _val33, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w4, _val34, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w5, _val35, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w6, _val36, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w7, _val37, _sum3);
__m256 _val40 = _mm256_broadcast_ss(tmpptr + 32);
__m256 _val41 = _mm256_broadcast_ss(tmpptr + 33);
__m256 _val42 = _mm256_broadcast_ss(tmpptr + 34);
__m256 _val43 = _mm256_broadcast_ss(tmpptr + 35);
__m256 _val44 = _mm256_broadcast_ss(tmpptr + 36);
__m256 _val45 = _mm256_broadcast_ss(tmpptr + 37);
__m256 _val46 = _mm256_broadcast_ss(tmpptr + 38);
__m256 _val47 = _mm256_broadcast_ss(tmpptr + 39);
__m256 _val50 = _mm256_broadcast_ss(tmpptr + 40);
__m256 _val51 = _mm256_broadcast_ss(tmpptr + 41);
__m256 _val52 = _mm256_broadcast_ss(tmpptr + 42);
__m256 _val53 = _mm256_broadcast_ss(tmpptr + 43);
__m256 _val54 = _mm256_broadcast_ss(tmpptr + 44);
__m256 _val55 = _mm256_broadcast_ss(tmpptr + 45);
__m256 _val56 = _mm256_broadcast_ss(tmpptr + 46);
__m256 _val57 = _mm256_broadcast_ss(tmpptr + 47);
_sum4 = _mm256_comp_fmadd_ps(_w0, _val40, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w1, _val41, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w2, _val42, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w3, _val43, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w4, _val44, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w5, _val45, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w6, _val46, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w7, _val47, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_w0, _val50, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w1, _val51, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w2, _val52, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w3, _val53, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w4, _val54, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w5, _val55, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w6, _val56, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w7, _val57, _sum5);
__m256 _val60 = _mm256_broadcast_ss(tmpptr + 48);
__m256 _val61 = _mm256_broadcast_ss(tmpptr + 49);
__m256 _val62 = _mm256_broadcast_ss(tmpptr + 50);
__m256 _val63 = _mm256_broadcast_ss(tmpptr + 51);
__m256 _val64 = _mm256_broadcast_ss(tmpptr + 52);
__m256 _val65 = _mm256_broadcast_ss(tmpptr + 53);
__m256 _val66 = _mm256_broadcast_ss(tmpptr + 54);
__m256 _val67 = _mm256_broadcast_ss(tmpptr + 55);
__m256 _val70 = _mm256_broadcast_ss(tmpptr + 56);
__m256 _val71 = _mm256_broadcast_ss(tmpptr + 57);
__m256 _val72 = _mm256_broadcast_ss(tmpptr + 58);
__m256 _val73 = _mm256_broadcast_ss(tmpptr + 59);
__m256 _val74 = _mm256_broadcast_ss(tmpptr + 60);
__m256 _val75 = _mm256_broadcast_ss(tmpptr + 61);
__m256 _val76 = _mm256_broadcast_ss(tmpptr + 62);
__m256 _val77 = _mm256_broadcast_ss(tmpptr + 63);
_sum6 = _mm256_comp_fmadd_ps(_w0, _val60, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w1, _val61, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w2, _val62, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w3, _val63, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w4, _val64, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w5, _val65, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w6, _val66, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w7, _val67, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_w0, _val70, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w1, _val71, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w2, _val72, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w3, _val73, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w4, _val74, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w5, _val75, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w6, _val76, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w7, _val77, _sum7);
tmpptr += 64;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
_mm256_storeu_ps(outptr + 32, _sum4);
_mm256_storeu_ps(outptr + 40, _sum5);
_mm256_storeu_ps(outptr + 48, _sum6);
_mm256_storeu_ps(outptr + 56, _sum7);
outptr += 64;
}
for (; i + 3 < size; i += 4)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = loadfp16(kptr);
__m256 _w1 = loadfp16(kptr + 8);
__m256 _w2 = loadfp16(kptr + 16);
__m256 _w3 = loadfp16(kptr + 24);
__m256 _w4 = loadfp16(kptr + 32);
__m256 _w5 = loadfp16(kptr + 40);
__m256 _w6 = loadfp16(kptr + 48);
__m256 _w7 = loadfp16(kptr + 56);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_sum0 = _mm256_comp_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w7, _val17, _sum1);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_sum2 = _mm256_comp_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w3, _val23, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w4, _val24, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w5, _val25, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w6, _val26, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w7, _val27, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w3, _val33, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w4, _val34, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w5, _val35, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w6, _val36, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w7, _val37, _sum3);
tmpptr += 32;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
outptr += 32;
}
for (; i + 1 < size; i += 2)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
__m256 _w0 = loadfp16(kptr);
__m256 _w1 = loadfp16(kptr + 8);
__m256 _w2 = loadfp16(kptr + 16);
__m256 _w3 = loadfp16(kptr + 24);
__m256 _w4 = loadfp16(kptr + 32);
__m256 _w5 = loadfp16(kptr + 40);
__m256 _w6 = loadfp16(kptr + 48);
__m256 _w7 = loadfp16(kptr + 56);
_sum0 = _mm256_comp_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w7, _val17, _sum1);
tmpptr += 16;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
outptr += 16;
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
__m256 _sum = _bias0;
const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _val0 = _mm256_broadcast_ss(tmpptr);
__m256 _val1 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val2 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val3 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val4 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val5 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val6 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val7 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _w0 = loadfp16(kptr);
__m256 _w1 = loadfp16(kptr + 8);
__m256 _w2 = loadfp16(kptr + 16);
__m256 _w3 = loadfp16(kptr + 24);
__m256 _w4 = loadfp16(kptr + 32);
__m256 _w5 = loadfp16(kptr + 40);
__m256 _w6 = loadfp16(kptr + 48);
__m256 _w7 = loadfp16(kptr + 56);
_sum = _mm256_comp_fmadd_ps(_w0, _val0, _sum);
_sum = _mm256_comp_fmadd_ps(_w1, _val1, _sum);
_sum = _mm256_comp_fmadd_ps(_w2, _val2, _sum);
_sum = _mm256_comp_fmadd_ps(_w3, _val3, _sum);
_sum = _mm256_comp_fmadd_ps(_w4, _val4, _sum);
_sum = _mm256_comp_fmadd_ps(_w5, _val5, _sum);
_sum = _mm256_comp_fmadd_ps(_w6, _val6, _sum);
_sum = _mm256_comp_fmadd_ps(_w7, _val7, _sum);
tmpptr += 8;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum);
outptr += 8;
}
}
}
static void conv1x1s2_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 8;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _v = _mm256_loadu_ps(r0);
_mm256_storeu_ps(outptr, _v);
r0 += 16;
outptr += 8;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_fp16_pack8_avx(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
convolution_sgemm.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_rvv(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
#if __riscv_vector
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
#endif
// Mat bottom_im2col(size, maxk, inch, 4u, 1, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
#if __riscv_vector
if (size >= packn)
tmp.create(packn * maxk, inch, size / packn + size % packn, 4u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 4u, 1, opt.workspace_allocator);
{
int nn_size = size / packn;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * packn;
float* tmpptr = tmp.channel(i / packn);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
vse32_v_f32m1(tmpptr, vle32_v_f32m1(img0, vl), vl);
img0 += size;
tmpptr += packn;
}
}
}
int remain_size_start = nn_size * packn;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
float* tmpptr = tmp.channel(i / packn + i % packn);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
img0 += size;
tmpptr += 1;
}
}
}
}
#else // __riscv_vector
tmp.create(maxk, inch, size, 4u, 1, opt.workspace_allocator);
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < size; i++)
{
float* tmpptr = tmp.channel(i);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
img0 += size;
tmpptr += 1;
}
}
}
}
#endif // __riscv_vector
#if __riscv_vector
int nn_outch = outch >> 3;
int remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
float* outptr4 = top_blob.channel(p + 4);
float* outptr5 = top_blob.channel(p + 5);
float* outptr6 = top_blob.channel(p + 6);
float* outptr7 = top_blob.channel(p + 7);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + (packn - 1) < size; i += packn)
{
const float* tmpptr = tmp.channel(i / packn);
const float* kptr = kernel.channel(p / 8);
int nn = inch * maxk; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(biasptr[0], vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(biasptr[1], vl);
vfloat32m1_t _sum2 = vfmv_v_f_f32m1(biasptr[2], vl);
vfloat32m1_t _sum3 = vfmv_v_f_f32m1(biasptr[3], vl);
vfloat32m1_t _sum4 = vfmv_v_f_f32m1(biasptr[4], vl);
vfloat32m1_t _sum5 = vfmv_v_f_f32m1(biasptr[5], vl);
vfloat32m1_t _sum6 = vfmv_v_f_f32m1(biasptr[6], vl);
vfloat32m1_t _sum7 = vfmv_v_f_f32m1(biasptr[7], vl);
for (int q = 0; q < nn; q++)
{
vfloat32m1_t _val = vle32_v_f32m1(tmpptr, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, kptr[0], _val, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, kptr[1], _val, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, kptr[2], _val, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, kptr[3], _val, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, kptr[4], _val, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, kptr[5], _val, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, kptr[6], _val, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, kptr[7], _val, vl);
tmpptr += packn;
kptr += 8;
}
vse32_v_f32m1(outptr0, _sum0, vl);
vse32_v_f32m1(outptr1, _sum1, vl);
vse32_v_f32m1(outptr2, _sum2, vl);
vse32_v_f32m1(outptr3, _sum3, vl);
vse32_v_f32m1(outptr4, _sum4, vl);
vse32_v_f32m1(outptr5, _sum5, vl);
vse32_v_f32m1(outptr6, _sum6, vl);
vse32_v_f32m1(outptr7, _sum7, vl);
outptr0 += packn;
outptr1 += packn;
outptr2 += packn;
outptr3 += packn;
outptr4 += packn;
outptr5 += packn;
outptr6 += packn;
outptr7 += packn;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / packn + i % packn);
const float* kptr = kernel.channel(p / 8);
int nn = inch * maxk; // inch always > 0
float sum0 = biasptr[0];
float sum1 = biasptr[1];
float sum2 = biasptr[2];
float sum3 = biasptr[3];
float sum4 = biasptr[4];
float sum5 = biasptr[5];
float sum6 = biasptr[6];
float sum7 = biasptr[7];
for (int q = 0; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
sum4 += tmpptr[0] * kptr[4];
sum5 += tmpptr[0] * kptr[5];
sum6 += tmpptr[0] * kptr[6];
sum7 += tmpptr[0] * kptr[7];
tmpptr++;
kptr += 8;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
outptr4[0] = sum4;
outptr5[0] = sum5;
outptr6[0] = sum6;
outptr7[0] = sum7;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
outptr4++;
outptr5++;
outptr6++;
outptr7++;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + (packn - 1) < size; i += packn)
{
const float* tmpptr = tmp.channel(i / packn);
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
int nn = inch * maxk; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(biasptr[0], vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(biasptr[1], vl);
vfloat32m1_t _sum2 = vfmv_v_f_f32m1(biasptr[2], vl);
vfloat32m1_t _sum3 = vfmv_v_f_f32m1(biasptr[3], vl);
for (int q = 0; q < nn; q++)
{
vfloat32m1_t _val = vle32_v_f32m1(tmpptr, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, kptr[0], _val, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, kptr[1], _val, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, kptr[2], _val, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, kptr[3], _val, vl);
tmpptr += packn;
kptr += 4;
}
vse32_v_f32m1(outptr0, _sum0, vl);
vse32_v_f32m1(outptr1, _sum1, vl);
vse32_v_f32m1(outptr2, _sum2, vl);
vse32_v_f32m1(outptr3, _sum3, vl);
outptr0 += packn;
outptr1 += packn;
outptr2 += packn;
outptr3 += packn;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / packn + i % packn);
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
int nn = inch * maxk; // inch always > 0
float sum0 = biasptr[0];
float sum1 = biasptr[1];
float sum2 = biasptr[2];
float sum3 = biasptr[3];
for (int q = 0; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr++;
kptr += 4;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
int i = 0;
for (; i + (packn - 1) < size; i += packn)
{
const float* tmpptr = tmp.channel(i / packn);
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(bias0, vl);
for (int q = 0; q < nn; q++)
{
_sum0 = vfmacc_vf_f32m1(_sum0, kptr[0], vle32_v_f32m1(tmpptr, vl), vl);
tmpptr += packn;
kptr++;
}
vse32_v_f32m1(outptr0, _sum0, vl);
outptr0 += packn;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / packn + i % packn);
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
float sum0 = bias0;
for (int q = 0; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
#else // __riscv_vector
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
for (int i = 0; i < size; i++)
{
const float* tmpptr = tmp.channel(i);
const float* kptr = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
float sum0 = bias0;
for (int q = 0; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
#endif // __riscv_vector
}
static void convolution_im2col_sgemm_transform_kernel_rvv(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 8b-maxk-inch-outch/8b
Mat kernel = _kernel.reshape(maxk, inch, outch);
#if __riscv_vector
kernel_tm.create(8 * maxk, inch, outch / 8 + (outch % 8) / 4 + outch % 4);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
const Mat k4 = kernel.channel(q + 4);
const Mat k5 = kernel.channel(q + 5);
const Mat k6 = kernel.channel(q + 6);
const Mat k7 = kernel.channel(q + 7);
float* g00 = kernel_tm.channel(q / 8);
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
const float* k10 = k1.row(p);
const float* k20 = k2.row(p);
const float* k30 = k3.row(p);
const float* k40 = k4.row(p);
const float* k50 = k5.row(p);
const float* k60 = k6.row(p);
const float* k70 = k7.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00[4] = k40[k];
g00[5] = k50[k];
g00[6] = k60[k];
g00[7] = k70[k];
g00 += 8;
}
}
}
for (; q + 3 < outch; q += 4)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
float* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4);
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
const float* k10 = k1.row(p);
const float* k20 = k2.row(p);
const float* k30 = k3.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00 += 4;
}
}
}
for (; q < outch; q++)
{
const Mat k0 = kernel.channel(q);
float* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4 + q % 4);
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00 += 1;
}
}
}
#else
kernel_tm = kernel;
#endif // __riscv_vector
}
static void convolution_im2col_sgemm_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 4u, 1, opt.workspace_allocator);
{
const int gap = w * stride_h - outw * stride_w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
float* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const float* sptr = img.row<const float>(dilation_h * u) + dilation_w * v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += stride_w;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_rvv(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
nested_serial.c | // RUN: %libomp-compile && env OMP_DISPLAY_AFFINITY=true %libomp-run | %python %S/check.py -c 'CHECK' %s
// REQUIRES: !abt
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char** argv) {
omp_set_affinity_format("TESTER: tl:%L at:%a tn:%n nt:%N");
omp_set_nested(1);
#pragma omp parallel num_threads(1)
{
#pragma omp parallel num_threads(1)
{ }
#pragma omp parallel num_threads(1)
{ }
#pragma omp parallel num_threads(1)
{
#pragma omp parallel num_threads(1)
{ }
}
#pragma omp parallel num_threads(1)
{ }
}
#pragma omp parallel num_threads(1)
{ }
#pragma omp parallel num_threads(1)
{ }
return 0;
}
// CHECK: num_threads=1 TESTER: tl:1 at:0 tn:0 nt:1
// CHECK: num_threads=1 TESTER: tl:2 at:0 tn:0 nt:1
// CHECK: num_threads=1 TESTER: tl:3 at:0 tn:0 nt:1
// CHECK: num_threads=1 TESTER: tl:2 at:0 tn:0 nt:1
// CHECK: num_threads=1 TESTER: tl:1 at:0 tn:0 nt:1
|
pp_collision.c | /* Copyright (C) 2017 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include "pp_collision.h"
#include <stdio.h>
#include <stdlib.h>
#include "imag_self_energy_with_g.h"
#include "interaction.h"
#include "lapack_wrapper.h"
#include "phonoc_array.h"
#include "phonoc_utils.h"
#include "triplet.h"
#include "triplet_iw.h"
static void get_collision(
double *ise, const long num_band0, const long num_band,
const long num_temps, const double *temperatures, const double *g,
const char *g_zero, const double *frequencies,
const lapack_complex_double *eigenvectors, const long triplet[3],
const long triplet_weight, const ConstBZGrid *bzgrid, const double *fc3,
const long is_compact_fc3, const double (*svecs)[3],
const long multi_dims[2], const long (*multiplicity)[2],
const double *masses, const long *p2s_map, const long *s2p_map,
const long *band_indices, const long symmetrize_fc3_q,
const double cutoff_frequency, const long openmp_per_triplets);
static void finalize_ise(double *imag_self_energy, const double *ise,
const long (*bz_grid_address)[3],
const long (*triplets)[3], const long num_triplets,
const long num_temps, const long num_band0,
const long is_NU);
void ppc_get_pp_collision(
double *imag_self_energy,
const long relative_grid_address[24][4][3], /* thm */
const double *frequencies, const lapack_complex_double *eigenvectors,
const long (*triplets)[3], const long num_triplets,
const long *triplet_weights, const ConstBZGrid *bzgrid, const double *fc3,
const long is_compact_fc3, const double (*svecs)[3],
const long multi_dims[2], const long (*multiplicity)[2],
const double *masses, const long *p2s_map, const long *s2p_map,
const Larray *band_indices, const Darray *temperatures, const long is_NU,
const long symmetrize_fc3_q, const double cutoff_frequency) {
long i;
long num_band, num_band0, num_band_prod, num_temps;
long openmp_per_triplets;
double *ise, *freqs_at_gp, *g;
char *g_zero;
long tp_relative_grid_address[2][24][4][3];
ise = NULL;
freqs_at_gp = NULL;
g = NULL;
g_zero = NULL;
num_band0 = band_indices->dims[0];
num_band = multi_dims[1] * 3;
num_band_prod = num_band0 * num_band * num_band;
num_temps = temperatures->dims[0];
ise =
(double *)malloc(sizeof(double) * num_triplets * num_temps * num_band0);
freqs_at_gp = (double *)malloc(sizeof(double) * num_band0);
for (i = 0; i < num_band0; i++) {
freqs_at_gp[i] =
frequencies[triplets[0][0] * num_band + band_indices->data[i]];
}
if (num_triplets > num_band) {
openmp_per_triplets = 1;
} else {
openmp_per_triplets = 0;
}
tpl_set_relative_grid_address(tp_relative_grid_address,
relative_grid_address, 2);
#ifdef PHPYOPENMP
#pragma omp parallel for schedule(guided) private( \
g, g_zero) if (openmp_per_triplets)
#endif
for (i = 0; i < num_triplets; i++) {
g = (double *)malloc(sizeof(double) * 2 * num_band_prod);
g_zero = (char *)malloc(sizeof(char) * num_band_prod);
tpi_get_integration_weight(g, g_zero, freqs_at_gp, /* used as f0 */
num_band0, tp_relative_grid_address,
triplets[i], 1, bzgrid,
frequencies, /* used as f1 */
num_band, frequencies, /* used as f2 */
num_band, 2, 1 - openmp_per_triplets);
get_collision(ise + i * num_temps * num_band0, num_band0, num_band,
num_temps, temperatures->data, g, g_zero, frequencies,
eigenvectors, triplets[i], triplet_weights[i], bzgrid,
fc3, is_compact_fc3, svecs, multi_dims, multiplicity,
masses, p2s_map, s2p_map, band_indices->data,
symmetrize_fc3_q, cutoff_frequency, openmp_per_triplets);
free(g_zero);
g_zero = NULL;
free(g);
g = NULL;
}
finalize_ise(imag_self_energy, ise, bzgrid->addresses, triplets,
num_triplets, num_temps, num_band0, is_NU);
free(freqs_at_gp);
freqs_at_gp = NULL;
free(ise);
ise = NULL;
}
void ppc_get_pp_collision_with_sigma(
double *imag_self_energy, const double sigma, const double sigma_cutoff,
const double *frequencies, const lapack_complex_double *eigenvectors,
const long (*triplets)[3], const long num_triplets,
const long *triplet_weights, const ConstBZGrid *bzgrid, const double *fc3,
const long is_compact_fc3, const double (*svecs)[3],
const long multi_dims[2], const long (*multiplicity)[2],
const double *masses, const long *p2s_map, const long *s2p_map,
const Larray *band_indices, const Darray *temperatures, const long is_NU,
const long symmetrize_fc3_q, const double cutoff_frequency) {
long i;
long num_band, num_band0, num_band_prod, num_temps;
long openmp_per_triplets, const_adrs_shift;
double cutoff;
double *ise, *freqs_at_gp, *g;
char *g_zero;
ise = NULL;
freqs_at_gp = NULL;
g = NULL;
g_zero = NULL;
num_band0 = band_indices->dims[0];
num_band = multi_dims[1] * 3;
num_band_prod = num_band0 * num_band * num_band;
num_temps = temperatures->dims[0];
const_adrs_shift = num_band_prod;
ise =
(double *)malloc(sizeof(double) * num_triplets * num_temps * num_band0);
freqs_at_gp = (double *)malloc(sizeof(double) * num_band0);
for (i = 0; i < num_band0; i++) {
freqs_at_gp[i] =
frequencies[triplets[0][0] * num_band + band_indices->data[i]];
}
if (num_triplets > num_band) {
openmp_per_triplets = 1;
} else {
openmp_per_triplets = 0;
}
cutoff = sigma * sigma_cutoff;
#ifdef PHPYOPENMP
#pragma omp parallel for schedule(guided) private( \
g, g_zero) if (openmp_per_triplets)
#endif
for (i = 0; i < num_triplets; i++) {
g = (double *)malloc(sizeof(double) * 2 * num_band_prod);
g_zero = (char *)malloc(sizeof(char) * num_band_prod);
tpi_get_integration_weight_with_sigma(
g, g_zero, sigma, cutoff, freqs_at_gp, num_band0, triplets[i],
const_adrs_shift, frequencies, num_band, 2, 0);
get_collision(ise + i * num_temps * num_band0, num_band0, num_band,
num_temps, temperatures->data, g, g_zero, frequencies,
eigenvectors, triplets[i], triplet_weights[i], bzgrid,
fc3, is_compact_fc3, svecs, multi_dims, multiplicity,
masses, p2s_map, s2p_map, band_indices->data,
symmetrize_fc3_q, cutoff_frequency, openmp_per_triplets);
free(g_zero);
g_zero = NULL;
free(g);
g = NULL;
}
finalize_ise(imag_self_energy, ise, bzgrid->addresses, triplets,
num_triplets, num_temps, num_band0, is_NU);
free(freqs_at_gp);
freqs_at_gp = NULL;
free(ise);
ise = NULL;
}
static void get_collision(
double *ise, const long num_band0, const long num_band,
const long num_temps, const double *temperatures, const double *g,
const char *g_zero, const double *frequencies,
const lapack_complex_double *eigenvectors, const long triplet[3],
const long triplet_weight, const ConstBZGrid *bzgrid, const double *fc3,
const long is_compact_fc3, const double (*svecs)[3],
const long multi_dims[2], const long (*multiplicity)[2],
const double *masses, const long *p2s_map, const long *s2p_map,
const long *band_indices, const long symmetrize_fc3_q,
const double cutoff_frequency, const long openmp_per_triplets) {
long i;
long num_band_prod, num_g_pos;
double *fc3_normal_squared;
long(*g_pos)[4];
fc3_normal_squared = NULL;
g_pos = NULL;
num_band_prod = num_band0 * num_band * num_band;
fc3_normal_squared = (double *)malloc(sizeof(double) * num_band_prod);
g_pos = (long(*)[4])malloc(sizeof(long[4]) * num_band_prod);
for (i = 0; i < num_band_prod; i++) {
fc3_normal_squared[i] = 0;
}
num_g_pos = ise_set_g_pos(g_pos, num_band0, num_band, g_zero);
itr_get_interaction_at_triplet(
fc3_normal_squared, num_band0, num_band, g_pos, num_g_pos, frequencies,
eigenvectors, triplet, bzgrid, fc3, is_compact_fc3, svecs, multi_dims,
multiplicity, masses, p2s_map, s2p_map, band_indices, symmetrize_fc3_q,
cutoff_frequency, 0, 0, 1 - openmp_per_triplets);
ise_imag_self_energy_at_triplet(
ise, num_band0, num_band, fc3_normal_squared, frequencies, triplet,
triplet_weight, g, g + num_band_prod, g_pos, num_g_pos, temperatures,
num_temps, cutoff_frequency, 1 - openmp_per_triplets, 0);
free(fc3_normal_squared);
fc3_normal_squared = NULL;
free(g_pos);
g_pos = NULL;
}
static void finalize_ise(double *imag_self_energy, const double *ise,
const long (*bz_grid_addresses)[3],
const long (*triplets)[3], const long num_triplets,
const long num_temps, const long num_band0,
const long is_NU) {
long i, j, k;
long is_N;
if (is_NU) {
for (i = 0; i < 2 * num_temps * num_band0; i++) {
imag_self_energy[i] = 0;
}
for (i = 0; i < num_triplets; i++) {
is_N = tpl_is_N(triplets[i], bz_grid_addresses);
for (j = 0; j < num_temps; j++) {
for (k = 0; k < num_band0; k++) {
if (is_N) {
imag_self_energy[j * num_band0 + k] +=
ise[i * num_temps * num_band0 + j * num_band0 + k];
} else {
imag_self_energy[num_temps * num_band0 + j * num_band0 +
k] +=
ise[i * num_temps * num_band0 + j * num_band0 + k];
}
}
}
}
} else {
for (i = 0; i < num_temps * num_band0; i++) {
imag_self_energy[i] = 0;
}
for (i = 0; i < num_triplets; i++) {
for (j = 0; j < num_temps; j++) {
for (k = 0; k < num_band0; k++) {
imag_self_energy[j * num_band0 + k] +=
ise[i * num_temps * num_band0 + j * num_band0 + k];
}
}
}
}
}
|
sequentialBinarySearch.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <time.h>
int binarySearch(int left, int right, int t, int token, int *a);
int main()
{
int i, token, n, t = 2, result, *a;
printf("Enter the size of the vector a: ");
scanf("%d", &n);
a = (int *)malloc(n * sizeof(int));
srand(time(NULL));
for (i = 0; i < n; i++)
{
a[i] = 2 * i;
printf("[%d]\t", a[i]);
}
printf("\nEnter the number to search: ");
scanf("%d", &token);
/*
We pass the following values to the function:
- the far left index (0)
- the far right index (n-1)
- the number of threads
- the token to search for
- vector A
*/
result = binarySearch(0, n - 1, t, token, a);
if (result < 0)
printf("\nThe number not is in the vector.");
else
printf("\nThe number is in the position: %d\n", result+1);
free(a);
return 0;
}
int binarySearch(int left, int right, int t, int token, int *a)
{
int index = -1, i;
//Size, is the size of the sub-portion each thread must work on
int size = (right - left + 1) / 2;
omp_set_num_threads(t);
omp_set_nested(1);
#pragma omp parallel shared(a, token, left, size, index)
{
int id = omp_get_thread_num();
/*
leftThread is the leftmost index on which each thread must work.
It is calculated by adding to the index passed to the function the product between
the id of the current thread and the size of its portion of the vector
*/
int leftThread = left + id * size;
/*
rightThread is the rightmost index on which each thread must work.
it is calculated from the sum of its leftThread with its sub-portion size -1
*/
int rightThread = leftThread + size - 1;
int middleThread;
/*
the while loop must be executed until the portions of the vector are greater than 1 and therefore the extreme left
is less than or equal to the extreme right
*/
while (leftThread <= rightThread)
{
middleThread = (rightThread + leftThread) / 2;
if (a[middleThread] == token)
{
/*
If the element in the median position is equal to the token
then the value is stored, and enables the while to exit
*/
index = middleThread;
leftThread = rightThread + 1;
}
else if (token < a[middleThread])
{
/*
Otherwise if the token is less than the element in the median position,
rightThread becomes median-1
*/
rightThread = middleThread - 1;
}
else
{
/*
Otherwise if the token is greater than the element in the median position,
leftThread becomes median+1
*/
leftThread = middleThread + 1;
}
}
}
return index;
}
|
sol1.c | /**
* \file
* \brief [Problem 26](https://projecteuler.net/problem=26) solution
* \author [Krishna Vedala](https://github.com/kvedala)
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#define MAX_DENO 2000 /**< limit of unit fractions */
#define MAX_LEN \
(MAX_DENO + 10) /**< length of resulting recurring fraction number */
/** comparison function for use with internal `qsort` algorithm */
int compare(const void *a, const void *b)
{
return (*(unsigned short *)a - *(unsigned short *)b);
}
/** Main function */
int main(int argc, char *argv[])
{
unsigned short max_digits = 0, max_idx_number = 0;
clock_t start_time = clock();
short deno;
#ifdef _OPENMP
#pragma omp for
#endif
for (deno = 2; deno < MAX_DENO; deno++)
{
unsigned short remainders[MAX_LEN];
unsigned short rem = 1, *rem_ptr = remainders;
memset(remainders, (unsigned short)-1,
MAX_LEN * sizeof(unsigned short));
// remainders[0] = 1;
// printf("1/%-4u\t ", deno);
unsigned short index = 0, num_digits;
while (rem != 0)
{
rem = (rem * 10) % deno;
if (rem == 0)
{
index = 0;
break;
}
rem_ptr = (unsigned short *)bsearch(
&rem, remainders, MAX_LEN, sizeof(unsigned short), compare);
// printf("%2d, ", rem);
// printf("(%14p), ", rem_ptr);
if (rem_ptr != NULL)
break;
remainders[index] = rem;
rem_ptr = remainders;
index++;
}
num_digits = index - (rem_ptr - remainders);
// printf("\n\t(%14p, %14p, %4u, %4u)\n", rem_ptr, remainders, index,
// num_digits);
#ifdef _OPENMP
#pragma omp critical
{
#endif
if (num_digits > max_digits)
{
max_digits = num_digits;
max_idx_number = deno;
// printf("\t (%u, %u)\n ", max_digits, max_idx_number);
}
#ifdef _OPENMP
}
#endif
}
clock_t end_time = clock();
printf("Time taken: %.4g ms\n",
1e3 * (double)(end_time - start_time) / CLOCKS_PER_SEC);
printf("Maximum digits: %hu\t Denominator: %hu\n", max_digits,
max_idx_number);
return 0;
}
|
GB_dense_ewise3_noaccum_template.c | //------------------------------------------------------------------------------
// GB_dense_ewise3_noaccum_template: C = A+B where all 3 matrices are dense
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB_unused.h"
{
//--------------------------------------------------------------------------
// get A, B, and C
//--------------------------------------------------------------------------
// any matrix may be aliased to any other (C==A, C==B, and/or A==B)
GB_ATYPE *Ax = (GB_ATYPE *) A->x ;
GB_BTYPE *Bx = (GB_BTYPE *) B->x ;
GB_CTYPE *Cx = (GB_CTYPE *) C->x ;
const int64_t cnz = GB_nnz (C) ;
ASSERT (GB_as_if_full (A)) ;
ASSERT (GB_as_if_full (B)) ;
ASSERT (GB_IS_FULL (C)) ;
ASSERT (!C->iso) ;
ASSERT (!A->iso) ;
ASSERT (!B->iso) ;
int64_t p ;
//--------------------------------------------------------------------------
// C = A+B where all 3 matrices are dense
//--------------------------------------------------------------------------
#if GB_CTYPE_IS_BTYPE
if (C == B)
{
//----------------------------------------------------------------------
// C = A+C where A and C are dense
//----------------------------------------------------------------------
// C and B cannot be aliased if their types differ
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
GB_GETA (aij, Ax, p, false) ; // aij = Ax [p]
GB_BINOP (GB_CX (p), aij, GB_CX (p), 0, 0) ; // Cx [p] = aij+Cx [p]
}
}
else
#endif
#if GB_CTYPE_IS_ATYPE
if (C == A)
{
//----------------------------------------------------------------------
// C = C+B where B and C are dense
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
GB_GETB (bij, Bx, p, false) ; // bij = Bx [p]
GB_BINOP (GB_CX (p), GB_CX (p), bij, 0, 0) ; // Cx [p] += bij
}
}
else
#endif
{
//----------------------------------------------------------------------
// C = A+B where all 3 matrices are dense
//----------------------------------------------------------------------
// note that A and B may still be aliased to each other
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
GB_GETA (aij, Ax, p, false) ; // aij = Ax [p]
GB_GETB (bij, Bx, p, false) ; // bij = Bx [p]
GB_BINOP (GB_CX (p), aij, bij, 0, 0) ; // Cx [p] = aij + bij
}
}
}
|
GB_binop__max_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_uint32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__max_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__max_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_uint32)
// A*D function (colscale): GB (_AxD__max_uint32)
// D*A function (rowscale): GB (_DxB__max_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__max_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__max_uint32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_uint32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_uint32)
// C=scalar+B GB (_bind1st__max_uint32)
// C=scalar+B' GB (_bind1st_tran__max_uint32)
// C=A+scalar GB (_bind2nd__max_uint32)
// C=A'+scalar GB (_bind2nd_tran__max_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IMAX (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_UINT32 || GxB_NO_MAX_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__max_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
clique_cmap.h | #pragma once
#include "cmap.h"
#include "graph.h"
#include "emb_list.h"
void cmap_3clique(Graph &g, uint64_t &total,
std::vector<cmap8_t> &cmaps) {
std::cout << "3-clique using cmap\n";
uint64_t counter = 0;
#pragma omp parallel for schedule(dynamic, 1) reduction(+:counter)
for (vidType v0 = 0; v0 < g.V(); v0++) {
uint64_t local_counter = 0;
auto y0 = g.N(v0);
for (auto v1 : y0) {
local_counter += intersection_num(y0, g.N(v1));
}
counter += local_counter;
}
total = counter;
}
void cmap_4clique(Graph &g, uint64_t &total,
std::vector<cmap8_t> &cmaps) {
std::cout << "4-clique using cmap\n";
uint64_t counter = 0;
#pragma omp parallel for schedule(dynamic, 1) reduction(+:counter)
for (vidType v0 = 0; v0 < g.V(); v0++) {
uint64_t local_counter = 0;
auto y0 = g.N(v0);
#if 0
for (auto v1 : y0) {
auto y1 = g.N(v1);
auto y0y1 = y0 & y1;
for (auto v2 : y0y1)
counter += intersection_num(y0y1, g.N(v2));
}
#else
auto tid = omp_get_thread_num();
auto &cmap = cmaps[tid];
for (auto u : y0) cmap.set(u, 1);
for (auto v1 : y0) {
auto y1 = g.N(v1);
VertexSet y0y1;
y0y1.clear();
for (auto u : y1) {
if (cmap.get(u) == 1) {
cmap.set(u, 2);
y0y1.add(u);
}
}
for (auto v2 : y0y1) {
for (auto v3 : g.N(v2)) {
// if (cmap.get(v3) == 2)
// local_counter ++;
local_counter += (cmap.get(v3) == 2);
}
}
for (auto u : y0y1) cmap.set(u, 1);
}
for (auto u : y0) cmap.set(u, 0);
#endif
counter += local_counter;
}
total = counter;
}
void cmap_5clique(Graph &g, uint64_t &total,
std::vector<cmap8_t> &cmaps) {
std::cout << "5-clique using cmap\n";
uint64_t counter = 0;
#pragma omp parallel for schedule(dynamic, 1) reduction(+:counter)
for (vidType v0 = 0; v0 < g.V(); v0++) {
auto y0 = g.N(v0);
uint64_t local_counter = 0;
#if 0
for (auto v1 : y0) {
auto y1 = g.N(v1);
auto y0y1 = y0 & y1;
for (auto v2 : y0y1) {
auto y2 = g.N(v2);
auto y0y1y2 = y0y1 & y2;
for (auto v3 : y0y1y2)
local_counter += intersection_num(y0y1y2, g.N(v3));
}
}
#else
auto tid = omp_get_thread_num();
auto &cmap = cmaps[tid];
for (auto u : y0) cmap.set(u, 1);
for (auto v1 : y0) {
auto y1 = g.N(v1);
VertexSet y0y1;
y0y1.clear();
for (auto u : y1) {
if (cmap.get(u) == 1) {
cmap.set(u, 2);
y0y1.add(u);
}
}
for (auto v2 : y0y1) {
VertexSet y0y1y2;
y0y1y2.clear();
for (auto u : g.N(v2)) {
if (cmap.get(u) == 2) {
cmap.set(u, 3);
y0y1y2.add(u);
}
}
for (auto v3 : y0y1y2) {
for (auto v4 : g.N(v3)) {
// if (cmap.get(v4) == 3)
// local_counter ++;
local_counter += (cmap.get(v4) == 3);
}
}
for (auto u : y0y1y2) cmap.set(u, 2);
}
for (auto u : y0y1) cmap.set(u, 1);
}
for (auto u : y0) cmap.set(u, 0);
#endif
counter += local_counter;
}
total = counter;
}
// ad-hoc 4-clique
void cmap_4clique(Graph &g, uint64_t &total,
std::vector<cmap8_t> &cmaps,
std::vector<EmbList> &emb_lists) {
std::cout << "4-clique using cmap and embedding list\n";
uint64_t counter = 0;
#pragma omp parallel for schedule(dynamic, 1) reduction(+:counter)
for (vidType v0 = 0; v0 < g.V(); v0 ++) {
auto tid = omp_get_thread_num();
auto &cmap = cmaps[tid];
auto &emb_list = emb_lists[tid];
auto y0 = g.N(v0);
for (auto u : y0) cmap.set(u, 1);
for (auto v1 : y0) {
emb_list.set_size(2, 0);
for (auto u : g.N(v1)) {
if (cmap.get(u) == 1) {
cmap.set(u, 2);
emb_list.add_emb(2, u);
}
}
for (vidType emb_id = 0; emb_id < emb_list.size(2); emb_id++) {
auto v2 = emb_list.get_vertex(2, emb_id);
for (auto v3 : g.N(v2)) {
// if (cmap.get(v3) == 2)
// counter ++;
counter += (cmap.get(v3) == 2);
}
}
for (vidType emb_id = 0; emb_id < emb_list.size(2); emb_id++) {
auto v = emb_list.get_vertex(2, emb_id);
cmap.set(v, 1);
}
}
for (auto u : y0) cmap.set(u, 0);
}
total = counter;
}
void cmap_5clique(Graph &g, uint64_t &total,
std::vector<cmap8_t> &cmaps,
std::vector<EmbList> &emb_lists) {
std::cout << "5-clique using cmap and embedding list\n";
uint64_t counter = 0;
#pragma omp parallel for schedule(dynamic, 1) reduction(+:counter)
for (vidType v0 = 0; v0 < g.V(); v0 ++) {
uint64_t local_counter = 0;
auto tid = omp_get_thread_num();
auto &cmap = cmaps[tid];
auto &emb_list = emb_lists[tid];
auto y0 = g.N(v0);
for (auto u : y0) cmap.set(u, 1);
for (auto v1 : y0) {
emb_list.set_size(2, 0);
for (auto u : g.N(v1)) {
if (cmap.get(u) == 1) {
cmap.set(u, 2);
emb_list.add_emb(2, u);
}
}
for (vidType id2 = 0; id2 < emb_list.size(2); id2++) {
auto v2 = emb_list.get_vertex(2, id2);
emb_list.set_size(3, 0);
for (auto u : g.N(v2)) {
if (cmap.get(u) == 2) {
cmap.set(u, 3);
emb_list.add_emb(3, u);
}
}
for (vidType id3 = 0; id3 < emb_list.size(3); id3++) {
auto v3 = emb_list.get_vertex(3, id3);
for (auto v4 : g.N(v3)) {
// if (cmap.get(v4) == 3)
// local_counter ++;
local_counter += (cmap.get(v4) == 3);
}
}
for (vidType id3 = 0; id3 < emb_list.size(3); id3++) {
auto v = emb_list.get_vertex(3, id3);
cmap.set(v, 2);
}
}
for (vidType id2 = 0; id2 < emb_list.size(2); id2++) {
auto v = emb_list.get_vertex(2, id2);
cmap.set(v, 1);
}
}
for (auto u : y0) cmap.set(u, 0);
counter += local_counter;
}
total = counter;
}
void cmap_kclique(Graph &g, unsigned k, uint64_t &total,
std::vector<cmap8_t> &cmaps) {
if (k == 3) {
cmap_3clique(g, total, cmaps);
} else if (k == 4) {
cmap_4clique(g, total, cmaps);
} else {
cmap_5clique(g, total, cmaps);
}
}
|
j2d5pt.c | #define BENCH_DIM 2
#define BENCH_FPP 10
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
#pragma scop
for (int t = 0; t < timestep; t++)
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] = (5.1f * A[t%2][i-1][j]
+ 12.1f * A[t%2][i][j-1] + 15.0f * A[t%2][i][j]
+ 12.2f * A[t%2][i][j+1] + 5.2f * A[t%2][i+1][j]) / 118;
#pragma endscop
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] = (5.1f * A[t%2][i-1][j]
+ 12.1f * A[t%2][i][j-1] + 15.0f * A[t%2][i][j]
+ 12.2f * A[t%2][i][j+1] + 5.2f * A[t%2][i+1][j]) / 118;
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
c-tree.h | /* Definitions for C parsing and type checking.
Copyright (C) 1987-2018 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_C_TREE_H
#define GCC_C_TREE_H
#include "c-family/c-common.h"
#include "diagnostic.h"
/* struct lang_identifier is private to c-decl.c, but langhooks.c needs to
know how big it is. This is sanity-checked in c-decl.c. */
#define C_SIZEOF_STRUCT_LANG_IDENTIFIER \
(sizeof (struct c_common_identifier) + 3 * sizeof (void *))
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */
#define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE
nonzero if the definition of the type has already started. */
#define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE)
/* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable
declarations whose type would be completed by completing that type. */
#define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE)
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID)
/* Record whether a type or decl was written with nonconstant size.
Note that TYPE_SIZE may have simplified to a constant. */
#define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE)
#define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE)
/* Record whether a type is defined inside a struct or union type.
This is used for -Wc++-compat. */
#define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE)
/* Record whether an "incomplete type" error was given for the type. */
#define C_TYPE_ERROR_REPORTED(TYPE) TYPE_LANG_FLAG_3 (TYPE)
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was defined without an explicit
return type. */
#define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */
#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP)
/* For a PARM_DECL, nonzero if it was declared as an array. */
#define C_ARRAY_PARAMETER(NODE) DECL_LANG_FLAG_0 (NODE)
/* For FUNCTION_DECLs, evaluates true if the decl is built-in but has
been declared. */
#define C_DECL_DECLARED_BUILTIN(EXP) \
DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP))
/* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a
built-in prototype and does not have a non-built-in prototype. */
#define C_DECL_BUILTIN_PROTOTYPE(EXP) \
DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a decl was declared register. This is strictly a
front-end flag, whereas DECL_REGISTER is used for code generation;
they may differ for structures with volatile fields. */
#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP)
/* Record whether a decl was used in an expression anywhere except an
unevaluated operand of sizeof / typeof / alignof. This is only
used for functions declared static but not defined, though outside
sizeof and typeof it is set for other function decls as well. */
#define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a variable has been declared threadprivate by
#pragma omp threadprivate. */
#define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL))
/* Nonzero for a decl which either doesn't exist or isn't a prototype.
N.B. Could be simplified if all built-in decls had complete prototypes
(but this is presently difficult because some of them need FILE*). */
#define C_DECL_ISNT_PROTOTYPE(EXP) \
(EXP == 0 \
|| (!prototype_p (TREE_TYPE (EXP)) \
&& !DECL_BUILT_IN (EXP)))
/* For FUNCTION_TYPE, a hidden list of types of arguments. The same as
TYPE_ARG_TYPES for functions with prototypes, but created for functions
without prototypes. */
#define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE)
/* For a CONSTRUCTOR, whether some initializer contains a
subexpression meaning it is not a constant expression. */
#define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR))
/* For a SAVE_EXPR, nonzero if the operand of the SAVE_EXPR has already
been folded. */
#define SAVE_EXPR_FOLDED_P(EXP) TREE_LANG_FLAG_1 (SAVE_EXPR_CHECK (EXP))
/* Record parser information about an expression that is irrelevant
for code generation alongside a tree representing its value. */
struct c_expr
{
/* The value of the expression. */
tree value;
/* Record the original unary/binary operator of an expression, which may
have been changed by fold, STRING_CST for unparenthesized string
constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls
(even if parenthesized), for subexpressions, and for non-constant
initializers, or ERROR_MARK for other expressions (including
parenthesized expressions). */
enum tree_code original_code;
/* If not NULL, the original type of an expression. This will
differ from the type of the value field for an enum constant.
The type of an enum constant is a plain integer type, but this
field will be the enum type. */
tree original_type;
/* The source range of this expression. This is redundant
for node values that have locations, but not all node kinds
have locations (e.g. constants, and references to params, locals,
etc), so we stash a copy here. */
source_range src_range;
/* Access to the first and last locations within the source spelling
of this expression. */
location_t get_start () const { return src_range.m_start; }
location_t get_finish () const { return src_range.m_finish; }
location_t get_location () const
{
if (EXPR_HAS_LOCATION (value))
return EXPR_LOCATION (value);
else
return make_location (get_start (), get_start (), get_finish ());
}
/* Set the value to error_mark_node whilst ensuring that src_range
is initialized. */
void set_error ()
{
value = error_mark_node;
src_range.m_start = UNKNOWN_LOCATION;
src_range.m_finish = UNKNOWN_LOCATION;
}
};
/* Type alias for struct c_expr. This allows to use the structure
inside the VEC types. */
typedef struct c_expr c_expr_t;
/* A kind of type specifier. Note that this information is currently
only used to distinguish tag definitions, tag references and typeof
uses. */
enum c_typespec_kind {
/* No typespec. This appears only in struct c_declspec. */
ctsk_none,
/* A reserved keyword type specifier. */
ctsk_resword,
/* A reference to a tag, previously declared, such as "struct foo".
This includes where the previous declaration was as a different
kind of tag, in which case this is only valid if shadowing that
tag in an inner scope. */
ctsk_tagref,
/* A reference to a tag, not previously declared in a visible
scope. */
ctsk_tagfirstref,
/* A definition of a tag such as "struct foo { int a; }". */
ctsk_tagdef,
/* A typedef name. */
ctsk_typedef,
/* An ObjC-specific kind of type specifier. */
ctsk_objc,
/* A typeof specifier, or _Atomic ( type-name ). */
ctsk_typeof
};
/* A type specifier: this structure is created in the parser and
passed to declspecs_add_type only. */
struct c_typespec {
/* What kind of type specifier this is. */
enum c_typespec_kind kind;
/* Whether the expression has operands suitable for use in constant
expressions. */
bool expr_const_operands;
/* The specifier itself. */
tree spec;
/* An expression to be evaluated before the type specifier, in the
case of typeof specifiers, or NULL otherwise or if no such
expression is required for a particular typeof specifier. In
particular, when typeof is applied to an expression of variably
modified type, that expression must be evaluated in order to
determine array sizes that form part of the type, but the
expression itself (as opposed to the array sizes) forms no part
of the type and so needs to be recorded separately. */
tree expr;
};
/* A storage class specifier. */
enum c_storage_class {
csc_none,
csc_auto,
csc_extern,
csc_register,
csc_static,
csc_typedef
};
/* A type specifier keyword "void", "_Bool", "char", "int", "float",
"double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum",
or none of these. */
enum c_typespec_keyword {
cts_none,
cts_void,
cts_bool,
cts_char,
cts_int,
cts_float,
cts_int_n,
cts_double,
cts_dfloat32,
cts_dfloat64,
cts_dfloat128,
cts_floatn_nx,
cts_fract,
cts_accum,
cts_auto_type
};
/* This enum lists all the possible declarator specifiers, storage
class or attribute that a user can write. There is at least one
enumerator per possible declarator specifier in the struct
c_declspecs below.
It is used to index the array of declspec locations in struct
c_declspecs. */
enum c_declspec_word {
cdw_typespec /* A catch-all for a typespec. */,
cdw_storage_class /* A catch-all for a storage class */,
cdw_attributes,
cdw_typedef,
cdw_explicit_signed,
cdw_deprecated,
cdw_default_int,
cdw_long,
cdw_long_long,
cdw_short,
cdw_signed,
cdw_unsigned,
cdw_complex,
cdw_inline,
cdw_noreturn,
cdw_thread,
cdw_const,
cdw_volatile,
cdw_restrict,
cdw_atomic,
cdw_saturating,
cdw_alignas,
cdw_address_space,
cdw_gimple,
cdw_rtl,
cdw_number_of_elements /* This one must always be the last
enumerator. */
};
/* A sequence of declaration specifiers in C. When a new declaration
specifier is added, please update the enum c_declspec_word above
accordingly. */
struct c_declspecs {
source_location locations[cdw_number_of_elements];
/* The type specified, if a single type specifier such as a struct,
union or enum specifier, typedef name or typeof specifies the
whole type, or NULL_TREE if none or a keyword such as "void" or
"char" is used. Does not include qualifiers. */
tree type;
/* Any expression to be evaluated before the type, from a typeof
specifier. */
tree expr;
/* The attributes from a typedef decl. */
tree decl_attr;
/* When parsing, the attributes. Outside the parser, this will be
NULL; attributes (possibly from multiple lists) will be passed
separately. */
tree attrs;
/* The pass to start compiling a __GIMPLE or __RTL function with. */
char *gimple_or_rtl_pass;
/* The base-2 log of the greatest alignment required by an _Alignas
specifier, in bytes, or -1 if no such specifiers with nonzero
alignment. */
int align_log;
/* For the __intN declspec, this stores the index into the int_n_* arrays. */
int int_n_idx;
/* For the _FloatN and _FloatNx declspec, this stores the index into
the floatn_nx_types array. */
int floatn_nx_idx;
/* The storage class specifier, or csc_none if none. */
enum c_storage_class storage_class;
/* Any type specifier keyword used such as "int", not reflecting
modifiers such as "short", or cts_none if none. */
ENUM_BITFIELD (c_typespec_keyword) typespec_word : 8;
/* The kind of type specifier if one has been seen, ctsk_none
otherwise. */
ENUM_BITFIELD (c_typespec_kind) typespec_kind : 3;
/* Whether any expressions in typeof specifiers may appear in
constant expressions. */
BOOL_BITFIELD expr_const_operands : 1;
/* Whether any declaration specifiers have been seen at all. */
BOOL_BITFIELD declspecs_seen_p : 1;
/* Whether something other than a storage class specifier or
attribute has been seen. This is used to warn for the
obsolescent usage of storage class specifiers other than at the
start of the list. (Doing this properly would require function
specifiers to be handled separately from storage class
specifiers.) */
BOOL_BITFIELD non_sc_seen_p : 1;
/* Whether the type is specified by a typedef or typeof name. */
BOOL_BITFIELD typedef_p : 1;
/* Whether the type is explicitly "signed" or specified by a typedef
whose type is explicitly "signed". */
BOOL_BITFIELD explicit_signed_p : 1;
/* Whether the specifiers include a deprecated typedef. */
BOOL_BITFIELD deprecated_p : 1;
/* Whether the type defaulted to "int" because there were no type
specifiers. */
BOOL_BITFIELD default_int_p : 1;
/* Whether "long" was specified. */
BOOL_BITFIELD long_p : 1;
/* Whether "long" was specified more than once. */
BOOL_BITFIELD long_long_p : 1;
/* Whether "short" was specified. */
BOOL_BITFIELD short_p : 1;
/* Whether "signed" was specified. */
BOOL_BITFIELD signed_p : 1;
/* Whether "unsigned" was specified. */
BOOL_BITFIELD unsigned_p : 1;
/* Whether "complex" was specified. */
BOOL_BITFIELD complex_p : 1;
/* Whether "inline" was specified. */
BOOL_BITFIELD inline_p : 1;
/* Whether "_Noreturn" was speciied. */
BOOL_BITFIELD noreturn_p : 1;
/* Whether "__thread" or "_Thread_local" was specified. */
BOOL_BITFIELD thread_p : 1;
/* Whether "__thread" rather than "_Thread_local" was specified. */
BOOL_BITFIELD thread_gnu_p : 1;
/* Whether "const" was specified. */
BOOL_BITFIELD const_p : 1;
/* Whether "volatile" was specified. */
BOOL_BITFIELD volatile_p : 1;
/* Whether "restrict" was specified. */
BOOL_BITFIELD restrict_p : 1;
/* Whether "_Atomic" was specified. */
BOOL_BITFIELD atomic_p : 1;
/* Whether "_Sat" was specified. */
BOOL_BITFIELD saturating_p : 1;
/* Whether any alignment specifier (even with zero alignment) was
specified. */
BOOL_BITFIELD alignas_p : 1;
/* Whether any __GIMPLE specifier was specified. */
BOOL_BITFIELD gimple_p : 1;
/* Whether any __RTL specifier was specified. */
BOOL_BITFIELD rtl_p : 1;
/* The address space that the declaration belongs to. */
addr_space_t address_space;
};
/* The various kinds of declarators in C. */
enum c_declarator_kind {
/* An identifier. */
cdk_id,
/* A function. */
cdk_function,
/* An array. */
cdk_array,
/* A pointer. */
cdk_pointer,
/* Parenthesized declarator with nested attributes. */
cdk_attrs
};
struct c_arg_tag {
/* The argument name. */
tree id;
/* The type of the argument. */
tree type;
};
/* Information about the parameters in a function declarator. */
struct c_arg_info {
/* A list of parameter decls. */
tree parms;
/* A list of structure, union and enum tags defined. */
vec<c_arg_tag, va_gc> *tags;
/* A list of argument types to go in the FUNCTION_TYPE. */
tree types;
/* A list of non-parameter decls (notably enumeration constants)
defined with the parameters. */
tree others;
/* A compound expression of VLA sizes from the parameters, or NULL.
In a function definition, these are used to ensure that
side-effects in sizes of arrays converted to pointers (such as a
parameter int i[n++]) take place; otherwise, they are
ignored. */
tree pending_sizes;
/* True when these arguments had [*]. */
BOOL_BITFIELD had_vla_unspec : 1;
};
/* A declarator. */
struct c_declarator {
/* The kind of declarator. */
enum c_declarator_kind kind;
location_t id_loc; /* Currently only set for cdk_id, cdk_array. */
/* Except for cdk_id, the contained declarator. For cdk_id, NULL. */
struct c_declarator *declarator;
union {
/* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract
declarator. */
tree id;
/* For functions. */
struct c_arg_info *arg_info;
/* For arrays. */
struct {
/* The array dimension, or NULL for [] and [*]. */
tree dimen;
/* The qualifiers inside []. */
int quals;
/* The attributes (currently ignored) inside []. */
tree attrs;
/* Whether [static] was used. */
BOOL_BITFIELD static_p : 1;
/* Whether [*] was used. */
BOOL_BITFIELD vla_unspec_p : 1;
} array;
/* For pointers, the qualifiers on the pointer type. */
int pointer_quals;
/* For attributes. */
tree attrs;
} u;
};
/* A type name. */
struct c_type_name {
/* The declaration specifiers. */
struct c_declspecs *specs;
/* The declarator. */
struct c_declarator *declarator;
};
/* A parameter. */
struct c_parm {
/* The declaration specifiers, minus any prefix attributes. */
struct c_declspecs *specs;
/* The attributes. */
tree attrs;
/* The declarator. */
struct c_declarator *declarator;
/* The location of the parameter. */
location_t loc;
};
/* Used when parsing an enum. Initialized by start_enum. */
struct c_enum_contents
{
/* While defining an enum type, this is 1 plus the last enumerator
constant value. */
tree enum_next_value;
/* Nonzero means that there was overflow computing enum_next_value. */
int enum_overflow;
};
/* A type of reference to a static identifier in an inline
function. */
enum c_inline_static_type {
/* Identifier with internal linkage used in function that may be an
inline definition (i.e., file-scope static). */
csi_internal,
/* Modifiable object with static storage duration defined in
function that may be an inline definition (i.e., local
static). */
csi_modifiable
};
/* in c-parser.c */
extern void c_parse_init (void);
extern bool c_keyword_starts_typename (enum rid keyword);
/* in c-aux-info.c */
extern void gen_aux_info_record (tree, int, int, int);
/* in c-decl.c */
struct c_spot_bindings;
struct c_struct_parse_info;
extern struct obstack parser_obstack;
extern tree c_break_label;
extern tree c_cont_label;
extern bool global_bindings_p (void);
extern tree pushdecl (tree);
extern void push_scope (void);
extern tree pop_scope (void);
extern void c_bindings_start_stmt_expr (struct c_spot_bindings *);
extern void c_bindings_end_stmt_expr (struct c_spot_bindings *);
extern void record_inline_static (location_t, tree, tree,
enum c_inline_static_type);
extern void c_init_decl_processing (void);
extern void c_print_identifier (FILE *, tree, int);
extern int quals_from_declspecs (const struct c_declspecs *);
extern struct c_declarator *build_array_declarator (location_t, tree,
struct c_declspecs *,
bool, bool);
extern tree build_enumerator (location_t, location_t, struct c_enum_contents *,
tree, tree);
extern tree check_for_loop_decls (location_t, bool);
extern void mark_forward_parm_decls (void);
extern void declare_parm_level (void);
extern void undeclared_variable (location_t, tree);
extern tree lookup_label_for_goto (location_t, tree);
extern tree declare_label (tree);
extern tree define_label (location_t, tree);
extern struct c_spot_bindings *c_get_switch_bindings (void);
extern void c_release_switch_bindings (struct c_spot_bindings *);
extern bool c_check_switch_jump_warnings (struct c_spot_bindings *,
location_t, location_t);
extern void finish_decl (tree, location_t, tree, tree, tree);
extern tree finish_enum (tree, tree, tree);
extern void finish_function (void);
extern tree finish_struct (location_t, tree, tree, tree,
struct c_struct_parse_info *);
extern struct c_arg_info *build_arg_info (void);
extern struct c_arg_info *get_parm_info (bool, tree);
extern tree grokfield (location_t, struct c_declarator *,
struct c_declspecs *, tree, tree *);
extern tree groktypename (struct c_type_name *, tree *, bool *);
extern tree grokparm (const struct c_parm *, tree *);
extern tree implicitly_declare (location_t, tree);
extern void keep_next_level (void);
extern void pending_xref_error (void);
extern void c_push_function_context (void);
extern void c_pop_function_context (void);
extern void push_parm_decl (const struct c_parm *, tree *);
extern struct c_declarator *set_array_declarator_inner (struct c_declarator *,
struct c_declarator *);
extern tree c_builtin_function (tree);
extern tree c_builtin_function_ext_scope (tree);
extern void shadow_tag (const struct c_declspecs *);
extern void shadow_tag_warned (const struct c_declspecs *, int);
extern tree start_enum (location_t, struct c_enum_contents *, tree);
extern bool start_function (struct c_declspecs *, struct c_declarator *, tree);
extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool,
tree);
extern tree start_struct (location_t, enum tree_code, tree,
struct c_struct_parse_info **);
extern void store_parm_decls (void);
extern void store_parm_decls_from (struct c_arg_info *);
extern void temp_store_parm_decls (tree, tree);
extern void temp_pop_parm_decls (void);
extern tree xref_tag (enum tree_code, tree);
extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree);
extern struct c_parm *build_c_parm (struct c_declspecs *, tree,
struct c_declarator *, location_t);
extern struct c_declarator *build_attrs_declarator (tree,
struct c_declarator *);
extern struct c_declarator *build_function_declarator (struct c_arg_info *,
struct c_declarator *);
extern struct c_declarator *build_id_declarator (tree);
extern struct c_declarator *make_pointer_declarator (struct c_declspecs *,
struct c_declarator *);
extern struct c_declspecs *build_null_declspecs (void);
extern struct c_declspecs *declspecs_add_qual (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_type (location_t,
struct c_declspecs *,
struct c_typespec);
extern struct c_declspecs *declspecs_add_scspec (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_attrs (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_addrspace (source_location,
struct c_declspecs *,
addr_space_t);
extern struct c_declspecs *declspecs_add_alignas (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *finish_declspecs (struct c_declspecs *);
/* in c-objc-common.c */
extern bool c_objc_common_init (void);
extern bool c_missing_noreturn_ok_p (tree);
extern bool c_warn_unused_global_decl (const_tree);
extern void c_initialize_diagnostics (diagnostic_context *);
extern bool c_vla_unspec_p (tree x, tree fn);
/* in c-typeck.c */
extern int in_alignof;
extern int in_sizeof;
extern int in_typeof;
extern tree c_last_sizeof_arg;
extern location_t c_last_sizeof_loc;
extern struct c_switch *c_switch_stack;
extern tree c_objc_common_truthvalue_conversion (location_t, tree);
extern tree require_complete_type (location_t, tree);
extern bool same_translation_unit_p (const_tree, const_tree);
extern int comptypes (tree, tree);
extern int comptypes_check_different_types (tree, tree, bool *);
extern bool c_vla_type_p (const_tree);
extern bool c_mark_addressable (tree, bool = false);
extern void c_incomplete_type_error (location_t, const_tree, const_tree);
extern tree c_type_promotes_to (tree);
extern struct c_expr default_function_array_conversion (location_t,
struct c_expr);
extern struct c_expr default_function_array_read_conversion (location_t,
struct c_expr);
extern struct c_expr convert_lvalue_to_rvalue (location_t, struct c_expr,
bool, bool);
extern tree decl_constant_value_1 (tree, bool);
extern void mark_exp_read (tree);
extern tree composite_type (tree, tree);
extern tree build_component_ref (location_t, tree, tree, location_t);
extern tree build_array_ref (location_t, tree, tree);
extern tree build_external_ref (location_t, tree, bool, tree *);
extern void pop_maybe_used (bool);
extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr);
extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *);
extern struct c_expr parser_build_unary_op (location_t, enum tree_code,
struct c_expr);
extern struct c_expr parser_build_binary_op (location_t,
enum tree_code, struct c_expr,
struct c_expr);
extern tree build_conditional_expr (location_t, tree, bool, tree, tree,
location_t, tree, tree, location_t);
extern tree build_compound_expr (location_t, tree, tree);
extern tree c_cast_expr (location_t, struct c_type_name *, tree);
extern tree build_c_cast (location_t, tree, tree);
extern void store_init_value (location_t, tree, tree, tree);
extern void maybe_warn_string_init (location_t, tree, struct c_expr);
extern void start_init (tree, tree, int, rich_location *);
extern void finish_init (void);
extern void really_start_incremental_init (tree);
extern void finish_implicit_inits (location_t, struct obstack *);
extern void push_init_level (location_t, int, struct obstack *);
extern struct c_expr pop_init_level (location_t, int, struct obstack *,
location_t);
extern void set_init_index (location_t, tree, tree, struct obstack *);
extern void set_init_label (location_t, tree, location_t, struct obstack *);
extern void process_init_element (location_t, struct c_expr, bool,
struct obstack *);
extern tree build_compound_literal (location_t, tree, tree, bool,
unsigned int);
extern void check_compound_literal_type (location_t, struct c_type_name *);
extern tree c_start_case (location_t, location_t, tree, bool);
extern void c_finish_case (tree, tree);
extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool,
bool);
extern tree build_asm_stmt (bool, tree);
extern int c_types_compatible_p (tree, tree);
extern tree c_begin_compound_stmt (bool);
extern tree c_end_compound_stmt (location_t, tree, bool);
extern void c_finish_if_stmt (location_t, tree, tree, tree);
extern void c_finish_loop (location_t, location_t, tree, location_t, tree,
tree, tree, tree, bool);
extern tree c_begin_stmt_expr (void);
extern tree c_finish_stmt_expr (location_t, tree);
extern tree c_process_expr_stmt (location_t, tree);
extern tree c_finish_expr_stmt (location_t, tree);
extern tree c_finish_return (location_t, tree, tree);
extern tree c_finish_bc_stmt (location_t, tree *, bool);
extern tree c_finish_goto_label (location_t, tree);
extern tree c_finish_goto_ptr (location_t, tree);
extern tree c_expr_to_decl (tree, bool *, bool *);
extern tree c_finish_omp_construct (location_t, enum tree_code, tree, tree);
extern tree c_finish_oacc_data (location_t, tree, tree);
extern tree c_finish_oacc_host_data (location_t, tree, tree);
extern tree c_begin_omp_parallel (void);
extern tree c_finish_omp_parallel (location_t, tree, tree);
extern tree c_begin_omp_task (void);
extern tree c_finish_omp_task (location_t, tree, tree);
extern void c_finish_omp_cancel (location_t, tree);
extern void c_finish_omp_cancellation_point (location_t, tree);
extern tree c_finish_omp_clauses (tree, enum c_omp_region_type);
extern tree c_build_va_arg (location_t, tree, location_t, tree);
extern tree c_finish_transaction (location_t, tree, int);
extern bool c_tree_equal (tree, tree);
extern tree c_build_function_call_vec (location_t, vec<location_t>, tree,
vec<tree, va_gc> *, vec<tree, va_gc> *);
extern tree c_omp_clause_copy_ctor (tree, tree, tree);
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
extern int current_function_returns_value;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
extern int current_function_returns_null;
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
extern int current_function_returns_abnormally;
/* In c-decl.c */
/* Tell the binding oracle what kind of binding we are looking for. */
enum c_oracle_request
{
C_ORACLE_SYMBOL,
C_ORACLE_TAG,
C_ORACLE_LABEL
};
/* If this is non-NULL, then it is a "binding oracle" which can lazily
create bindings when needed by the C compiler. The oracle is told
the name and type of the binding to create. It can call pushdecl
or the like to ensure the binding is visible; or do nothing,
leaving the binding untouched. c-decl.c takes note of when the
oracle has been called and will not call it again if it fails to
create a given binding. */
typedef void c_binding_oracle_function (enum c_oracle_request, tree identifier);
extern c_binding_oracle_function *c_binding_oracle;
extern void c_finish_incomplete_decl (tree);
extern tree c_omp_reduction_id (enum tree_code, tree);
extern tree c_omp_reduction_decl (tree);
extern tree c_omp_reduction_lookup (tree, tree);
extern tree c_check_omp_declare_reduction_r (tree *, int *, void *);
extern void c_pushtag (location_t, tree, tree);
extern void c_bind (location_t, tree, bool);
extern bool tag_exists_p (enum tree_code, tree);
/* In c-errors.c */
extern bool pedwarn_c90 (location_t, int opt, const char *, ...)
ATTRIBUTE_GCC_DIAG(3,4);
extern bool pedwarn_c99 (location_t, int opt, const char *, ...)
ATTRIBUTE_GCC_DIAG(3,4);
extern void
set_c_expr_source_range (c_expr *expr,
location_t start, location_t finish);
extern void
set_c_expr_source_range (c_expr *expr,
source_range src_range);
/* In c-fold.c */
extern vec<tree> incomplete_record_decls;
#if CHECKING_P
namespace selftest {
extern void run_c_tests (void);
} // namespace selftest
#endif /* #if CHECKING_P */
#endif /* ! GCC_C_TREE_H */
|
csc.h | #ifndef __csc_H
#define __csc_H
template<typename I, typename T1,typename T2>
void csc_matvec_noomp_contig(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T1 a,
const T2 x[],
T2 y[])
{
if(overwrite_y){
for(I j = 0; j < n_row; j++){
y[j] = 0;
}
}
for(I j = 0; j < n_col; j++){
I col_start = Ap[j];
I col_end = Ap[j+1];
for(I ii = col_start; ii < col_end; ii++){
const I i = Ai[ii];
y[i] += (a * Ax[ii]) * x[j];
}
}
}
template<typename I, typename T1,typename T2>
void csc_matvec_noomp_strided(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T1 a,
const npy_intp x_stride,
const T2 x[],
const npy_intp y_stride,
T2 y[])
{
if(overwrite_y){
for(I j = 0; j < n_row; j++){
y[j] = 0;
}
}
for(I j = 0; j < n_col; j++){
I col_start = Ap[j];
I col_end = Ap[j+1];
for(I ii = col_start; ii < col_end; ii++){
const I i = Ai[ii];
y[i * y_stride] += (a * Ax[ii]) * x[j * x_stride];
}
}
}
template<typename I, typename T1,typename T2>
void csc_matvecs_noomp_strided(const bool overwrite_y,
const I n_row,
const I n_col,
const npy_intp n_vecs,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T1 a,
const npy_intp x_stride_row,
const npy_intp x_stride_col,
const T2 x[],
const npy_intp y_stride_row,
const npy_intp y_stride_col,
T2 y[])
{
if(overwrite_y){
const npy_intp n = n_vecs * n_row;
for(npy_intp i = 0; i < n; i++){
y[i] = T2(0);
}
}
if(y_stride_col < y_stride_row){
for(I j = 0; j < n_col; j++){
I col_start = Ap[j];
I col_end = Ap[j+1];
for(I ii = col_start; ii < col_end; ii++){
T2 * y_row = y + y_stride_row * Ai[ii];
const T2 ax = (a * Ax[ii]);
axpy_strided(n_vecs, ax, x_stride_col, x, y_stride_col, y_row);
}
x += x_stride_row;
}
}
else{
for(I m=0;m<n_vecs;m++){
const T2 * x_row = x;
for(I j = 0; j < n_col; j++){
I col_start = Ap[j];
I col_end = Ap[j+1];
for(I ii = col_start; ii < col_end; ii++){
y[y_stride_row * Ai[ii]] += (a * Ax[ii]) * (*x_row);
}
x_row += x_stride_row;
}
x += x_stride_col;
y += y_stride_col;
}
}
}
#if defined(_OPENMP)
#include "openmp.h"
template<typename I, typename T1,typename T2>
void csc_matvec_omp_contig(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T1 a,
const T2 x[],
T2 y[])
{
#pragma omp parallel
{
const int nthread = omp_get_num_threads();
const I chunk = std::max((I)1,n_row/(100*nthread));
if(overwrite_y){
#pragma omp for schedule(static)
for(I j = 0; j < n_row; j++){
y[j] = 0;
}
}
#pragma omp for schedule(dynamic,chunk)
for(I j = 0; j < n_col; j++){
I col_start = Ap[j];
I col_end = Ap[j+1];
for(I ii = col_start; ii < col_end; ii++){
const I i = Ai[ii];
const T2 aa = (a * Ax[ii]) * x[j];
atomic_add(y[i],aa);
}
}
}
}
template<typename I, typename T1,typename T2>
void csc_matvec_omp_strided(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T1 a,
const npy_intp x_stride,
const T2 x[],
const npy_intp y_stride,
T2 y[])
{
#pragma omp parallel
{
const int nthread = omp_get_num_threads();
const I chunk = std::max((I)1,n_row/(100*nthread));
if(overwrite_y){
#pragma omp for schedule(static)
for(I j = 0; j < n_row; j++){
y[j * y_stride] = 0;
}
}
#pragma omp for schedule(dynamic,chunk)
for(I j = 0; j < n_col; j++){
I col_start = Ap[j];
I col_end = Ap[j+1];
for(I ii = col_start; ii < col_end; ii++){
const I i = Ai[ii];
const T2 aa = (a * Ax[ii]) * x[j * x_stride];
atomic_add(y[i * y_stride],aa);
}
}
}
}
template<typename I, typename T1,typename T2>
inline void csc_matvecs_omp_strided(const bool overwrite_y,
const I n_row,
const I n_col,
const npy_intp n_vecs,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T1 a,
const npy_intp x_stride_row,
const npy_intp x_stride_col,
const T2 x[],
const npy_intp y_stride_row,
const npy_intp y_stride_col,
T2 y[])
{
csc_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Ai,Ax,a,x_stride_row,x_stride_col,x,y_stride_row,y_stride_col,y);
}
#else
template<typename I, typename T1,typename T2>
void csc_matvec_omp_contig(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T1 a,
const T2 x[],
T2 y[])
{
csc_matvec_noomp_contig(overwrite_y,n_row,n_col,Ap,Ai,Ax,a,x,y);
}
template<typename I, typename T1,typename T2>
inline void csc_matvec_omp_strided(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T1 a,
const npy_intp x_stride,
const T2 x[],
const npy_intp y_stride,
T2 y[])
{
csc_matvec_noomp_strided(overwrite_y,n_row,n_col,Ap,Ai,Ax,a,x_stride,x,y_stride,y);
}
template<typename I, typename T1,typename T2>
inline void csc_matvecs_omp_strided(const bool overwrite_y,
const I n_row,
const I n_col,
const npy_intp n_vecs,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T1 a,
const npy_intp x_stride_row,
const npy_intp x_stride_col,
const T2 x[],
const npy_intp y_stride_row,
const npy_intp y_stride_col,
T2 y[])
{
csc_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Ai,Ax,a,x_stride_row,x_stride_col,x,y_stride_row,y_stride_col,y);
}
#endif
// when openmp is not being used omp and noomp versions are identical
template<typename I, typename T1,typename T2>
void csc_matvec_noomp(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Aj[],
const T1 Ax[],
const T1 a,
const npy_intp x_stride_byte,
const T2 x[],
const npy_intp y_stride_byte,
T2 y[])
{
const npy_intp y_stride = y_stride_byte/sizeof(T2);
const npy_intp x_stride = x_stride_byte/sizeof(T2);
if(y_stride == 1){
if(x_stride == 1){
csc_matvec_noomp_contig(overwrite_y,n_row,n_col,Ap,Aj,Ax,a,x,y);
}
else{
csc_matvec_noomp_strided(overwrite_y,n_row,n_col,Ap,Aj,Ax,a,x_stride,x,1,y);
}
}
else{
if(x_stride == 1){
csc_matvec_noomp_strided(overwrite_y,n_row,n_col,Ap,Aj,Ax,a,1,x,y_stride,y);
}
else{
csc_matvec_noomp_strided(overwrite_y,n_row,n_col,Ap,Aj,Ax,a,x_stride,x,y_stride,y);
}
}
}
template<typename I, typename T1,typename T2>
void csc_matvec_omp(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Aj[],
const T1 Ax[],
const T1 a,
const npy_intp x_stride_byte,
const T2 x[],
const npy_intp y_stride_byte,
T2 y[])
{
const npy_intp y_stride = y_stride_byte/sizeof(T2);
const npy_intp x_stride = x_stride_byte/sizeof(T2);
if(y_stride == 1){
if(x_stride == 1){
csc_matvec_omp_contig(overwrite_y,n_row,n_col,Ap,Aj,Ax,a,x,y);
}
else{
csc_matvec_omp_strided(overwrite_y,n_row,n_col,Ap,Aj,Ax,a,x_stride,x,1,y);
}
}
else{
if(x_stride == 1){
csc_matvec_omp_strided(overwrite_y,n_row,n_col,Ap,Aj,Ax,a,1,x,y_stride,y);
}
else{
csc_matvec_omp_strided(overwrite_y,n_row,n_col,Ap,Aj,Ax,a,x_stride,x,y_stride,y);
}
}
}
template<typename I, typename T1,typename T2>
inline void csc_matvecs_noomp(const bool overwrite_y,
const I n_row,
const I n_col,
const npy_intp n_vecs,
const I Ap[],
const I Aj[],
const T1 Ax[],
const T1 a,
const npy_intp x_stride_row_byte,
const npy_intp x_stride_col_byte,
const T2 x[],
const npy_intp y_stride_row_byte,
const npy_intp y_stride_col_byte,
T2 y[])
{
const npy_intp y_stride_row = y_stride_row_byte/sizeof(T2);
const npy_intp y_stride_col = y_stride_col_byte/sizeof(T2);
const npy_intp x_stride_row = x_stride_row_byte/sizeof(T2);
const npy_intp x_stride_col = x_stride_col_byte/sizeof(T2);
if(y_stride_col==1){
if(x_stride_col==1){
csc_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,x_stride_row,1,x,y_stride_row,1,y);
}
else if(x_stride_row==1){
csc_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,1,x_stride_col,x,y_stride_row,1,y);
}
else{
csc_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,x_stride_row,x_stride_col,x,y_stride_row,1,y);
}
}
else if(y_stride_row==1){
if(x_stride_col==1){
csc_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,x_stride_row,1,x,y_stride_row,1,y);
}
else if(x_stride_row==1){
csc_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,1,x_stride_col,x,y_stride_row,1,y);
}
else{
csc_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,x_stride_row,x_stride_col,x,1,y_stride_col,y);
}
}
else{
csc_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,x_stride_row,x_stride_col,x,y_stride_row,y_stride_col,y);
}
}
template<typename I, typename T1,typename T2>
inline void csc_matvecs_omp(const bool overwrite_y,
const I n_row,
const I n_col,
const npy_intp n_vecs,
const I Ap[],
const I Aj[],
const T1 Ax[],
const T1 a,
const npy_intp x_stride_row_byte,
const npy_intp x_stride_col_byte,
const T2 x[],
const npy_intp y_stride_row_byte,
const npy_intp y_stride_col_byte,
T2 y[])
{
const npy_intp y_stride_row = y_stride_row_byte/sizeof(T2);
const npy_intp y_stride_col = y_stride_col_byte/sizeof(T2);
const npy_intp x_stride_row = x_stride_row_byte/sizeof(T2);
const npy_intp x_stride_col = x_stride_col_byte/sizeof(T2);
if(y_stride_col==1){
if(x_stride_col==1){
csc_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,x_stride_row,1,x,y_stride_row,1,y);
}
else if(x_stride_row==1){
csc_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,1,x_stride_col,x,y_stride_row,1,y);
}
else{
csc_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,x_stride_row,x_stride_col,x,y_stride_row,1,y);
}
}
else if(y_stride_row==1){
if(x_stride_col==1){
csc_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,x_stride_row,1,x,1,y_stride_col,y);
}
else if(x_stride_row==1){
csc_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,1,x_stride_col,x,1,y_stride_col,y);
}
else{
csc_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,x_stride_row,x_stride_col,x,1,y_stride_col,y);
}
}
else{
csc_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,x_stride_row,x_stride_col,x,y_stride_row,y_stride_col,y);
}
}
#endif |
batch_norm.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_KERNELS_BATCH_NORM_H_
#define MACE_KERNELS_BATCH_NORM_H_
#if defined(MACE_ENABLE_NEON) && defined(__aarch64__)
#include <arm_neon.h>
#endif
#include <memory>
#include <vector>
#include "mace/core/future.h"
#include "mace/core/tensor.h"
#include "mace/kernels/activation.h"
#include "mace/public/mace.h"
#ifdef MACE_ENABLE_OPENCL
#include "mace/core/runtime/opencl/cl2_header.h"
#endif // MACE_ENABLE_OPENCL
namespace mace {
namespace kernels {
struct BatchNormFunctorBase {
BatchNormFunctorBase(bool folded_constant,
const ActivationType activation,
const float relux_max_limit)
: folded_constant_(folded_constant),
activation_(activation),
relux_max_limit_(relux_max_limit) {}
const bool folded_constant_;
const ActivationType activation_;
const float relux_max_limit_;
};
template<DeviceType D, typename T>
struct BatchNormFunctor;
template<>
struct BatchNormFunctor<DeviceType::CPU, float> : BatchNormFunctorBase {
BatchNormFunctor(const bool folded_constant,
const ActivationType activation,
const float relux_max_limit)
: BatchNormFunctorBase(folded_constant, activation, relux_max_limit) {}
MaceStatus operator()(const Tensor *input,
const Tensor *scale,
const Tensor *offset,
const Tensor *mean,
const Tensor *var,
const float epsilon,
Tensor *output,
StatsFuture *future) {
MACE_UNUSED(future);
// Batch normalization in the paper https://arxiv.org/abs/1502.03167 .
// The calculation formula for inference is
// Y = \frac{ \scale } { \sqrt{var+\variance_epsilon} } * X +
// ( \offset - \frac { \scale * mean } {
// \sqrt{var+\variance_epsilon} }
// new_scale = \frac{ \scale } { \sqrt{var+\variance_epsilon} }
// new_offset = \offset - mean * common_val;
// Y = new_scale * X + new_offset;
const index_t batch = input->dim(0);
const index_t channels = input->dim(1);
const index_t height = input->dim(2);
const index_t width = input->dim(3);
Tensor::MappingGuard input_mapper(input);
Tensor::MappingGuard scale_mapper(scale);
Tensor::MappingGuard offset_mapper(offset);
Tensor::MappingGuard output_mapper(output);
const float *input_ptr = input->data<float>();
const float *scale_ptr = scale->data<float>();
const float *offset_ptr = offset->data<float>();
float *output_ptr = output->mutable_data<float>();
std::vector<float> new_scale;
std::vector<float> new_offset;
if (!folded_constant_) {
new_scale.resize(channels);
new_offset.resize(channels);
Tensor::MappingGuard mean_mapper(mean);
Tensor::MappingGuard var_mapper(var);
const float *mean_ptr = mean->data<float>();
const float *var_ptr = var->data<float>();
#pragma omp parallel for
for (index_t c = 0; c < channels; ++c) {
new_scale[c] = scale_ptr[c] / std::sqrt(var_ptr[c] + epsilon);
new_offset[c] = offset_ptr[c] - mean_ptr[c] * new_scale[c];
}
}
const float *scale_data = folded_constant_ ? scale_ptr : new_scale.data();
const float
*offset_data = folded_constant_ ? offset_ptr : new_offset.data();
index_t channel_size = height * width;
index_t batch_size = channels * channel_size;
// NEON is slower, so stick to the trivial implementaion
#pragma omp parallel for collapse(2)
for (index_t b = 0; b < batch; ++b) {
for (index_t c = 0; c < channels; ++c) {
index_t offset = b * batch_size + c * channel_size;
for (index_t hw = 0; hw < height * width; ++hw) {
output_ptr[offset + hw] =
scale_data[c] * input_ptr[offset + hw] + offset_data[c];
}
}
}
DoActivation(output_ptr, output_ptr, output->size(), activation_,
relux_max_limit_);
return MACE_SUCCESS;
}
};
#ifdef MACE_ENABLE_OPENCL
template<typename T>
struct BatchNormFunctor<DeviceType::GPU, T> : BatchNormFunctorBase {
BatchNormFunctor(const bool folded_constant,
const ActivationType activation,
const float relux_max_limit)
: BatchNormFunctorBase(folded_constant, activation, relux_max_limit) {}
MaceStatus operator()(const Tensor *input,
const Tensor *scale,
const Tensor *offset,
const Tensor *mean,
const Tensor *var,
const float epsilon,
Tensor *output,
StatsFuture *future);
cl::Kernel kernel_;
uint32_t kwg_size_;
std::unique_ptr<BufferBase> kernel_error_;
std::vector<index_t> input_shape_;
};
#endif // MACE_ENABLE_OPENCL
} // namespace kernels
} // namespace mace
#endif // MACE_KERNELS_BATCH_NORM_H_
|
conv_dw_kernel_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: haoluo@openailab.com
*/
#include "conv_dw_kernel_arm.h"
#include "conv_dw_k5_k7_kernel_arm.h"
#include "conv_dw_dilation_kernel_arm.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "module/module.h"
#include "operator/op.h"
#include "utility/sys_port.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
static void pad_0_align_2D(float* dst, float* src, int m, int n, int m_align, int n_align, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, m * n * sizeof(float));
return;
}
for (i = 0; i < m; ++i)
{
memcpy(dst + (i + pad_h) * n_align + pad_w, src + i * n, n * sizeof(float));
}
}
// pad 0 in right and down side on 3D
static void pad_0_align_3D(float* dst, float* src, int m, int n, int m_align, int n_align, int c, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, c * m * n * sizeof(float));
return;
}
for (i = 0; i < c; ++i)
{
pad_0_align_2D(dst + i * m_align * n_align, src + i * m * n, m, n, m_align, n_align, pad_h, pad_w);
}
}
static void delete_0_2D(float* dst, float* src, int m_align, int n_align, int m, int n, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, m * n * sizeof(float));
return;
}
for (i = 0; i < m; ++i)
{
memcpy(dst + i * n, src + (i + pad_h) * n_align + pad_w, n * sizeof(float));
}
}
// pad 0 in right and down side on 3D
static void delete_0_3D(float* dst, float* src, int m_align, int n_align, int m, int n, int c, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, c * m * n * sizeof(float));
return;
}
for (i = 0; i < c; ++i)
{
delete_0_2D(dst + i * m * n, src + i * m_align * n_align, m_align, n_align, m, n, pad_h, pad_w);
}
}
#ifdef __aarch64__
void dw_k3s2p0(float* data, int h, int w, float* kernel, float* output, float* bias, int out_w, int act);
void dw_k3s2p0p1(float* data, int h, int w, float* kernel, float* output, float* bias, int out_w, int act);
void dw_k3s1p1_a72(float* data, int h, int w, float* kernel, float* output, float* bias, int act);
void dw_k3s2p1_a72(float* data, int h, int w, float* kernel, float* output, float* bias, int act);
static void DirectConv(float* input_buf, int input_h, int input_w, float* output_buf, int output_h, int output_w,
float* weight_buf, int channel_num, int stride, float* bias, int* pads, int activation,
int num_thread, int cpu_affinity)
{
int channel_size = input_h * input_w;
int channel_size_out = output_h * output_w;
int pad_h0 = pads[0];
int pad_h1 = pads[2];
if (stride == 1)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < channel_num; i++)
{
float* cur_input = input_buf + i * channel_size;
float* cur_output = output_buf + i * channel_size_out;
float* bias_tmp = NULL;
if (bias)
bias_tmp = bias + i;
dw_k3s1p1_a72(cur_input, input_h, input_w, weight_buf + i * 9, cur_output, bias_tmp, activation);
}
}
else if (pad_h0 == 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < channel_num; i++)
{
float* cur_input = input_buf + i * channel_size;
float* cur_output = output_buf + i * channel_size_out;
float* bias_tmp = NULL;
if (bias)
bias_tmp = bias + i;
if (pad_h1 == 0)
dw_k3s2p0(cur_input, input_h, input_w, weight_buf + i * 9, cur_output, bias_tmp, output_w, activation);
else
dw_k3s2p0p1(cur_input, input_h, input_w, weight_buf + i * 9, cur_output, bias_tmp, output_w,
activation);
}
}
else
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < channel_num; i++)
{
float* cur_input = input_buf + i * channel_size;
float* cur_output = output_buf + i * channel_size_out;
float* bias_tmp = NULL;
if (bias)
bias_tmp = bias + i;
dw_k3s2p1_a72(cur_input, input_h, input_w, weight_buf + i * 9, cur_output, bias_tmp, activation);
}
}
}
#else
void dw_k3s2(float* input, float* kernel, float* output, int channel, int width, int height, float* bias, int pad0);
void dw_k3s2_relu_fused(float* input, float* kernel, float* output, int channel, int width, int height, float* bias,
int pad0);
void dw_k3s2_relu6_fused(float* input, float* kernel, float* output, int channel, int width, int height, float* bias,
int pad0);
void dw_k3s1p1(float* input, float* kernel, float* output, int channel, int width, int height, float* bias);
void dw_k3s1p1_relu_fused(float* input, float* kernel, float* output, int channel, int width, int height, float* bias);
void dw_k3s1p1_relu6_fused(float* input, float* kernel, float* output, int channel, int width, int height, float* bias);
static void DirectConv(float* input_buf, int input_h, int input_w, float* output_buf, int output_h, int output_w,
float* weight_buf, int channel_num, int stride, float* bias, int* pads, int activation,
int num_thread, int cpu_affinity)
{
int pad_h0 = pads[0];
if (stride == 1)
{
#pragma omp parallel for num_threads(num_thread)
for (int c = 0; c < channel_num; c++)
{
float* cur_input = input_buf + c * input_h * input_w;
float* cur_output = output_buf + c * output_h * output_w;
float* cur_weight = weight_buf + c * 9;
float* cur_bias = bias ? bias + c : bias;
if (activation >= 0)
{
if (activation == 0)
dw_k3s1p1_relu_fused(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias);
else
dw_k3s1p1_relu6_fused(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias);
}
else
{
dw_k3s1p1(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias);
}
}
}
else if (stride == 2)
{
#pragma omp parallel for num_threads(num_thread)
for (int c = 0; c < channel_num; c++)
{
float* cur_input = input_buf + c * input_h * input_w;
float* cur_output = output_buf + c * output_h * output_w;
float* cur_weight = weight_buf + c * 9;
float* cur_bias = bias ? bias + c : bias;
if (activation >= 0)
{
if (activation == 0)
dw_k3s2_relu_fused(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias, pad_h0);
else
dw_k3s2_relu6_fused(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias, pad_h0);
}
else
{
dw_k3s2(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias, pad_h0);
}
}
}
}
#endif
int conv_dw_prerun(struct tensor* input_tensor, struct tensor* filter_tensor,
struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param)
{
int batch = input_tensor->dims[0];
int input_c = input_tensor->dims[1];
int input_h = input_tensor->dims[2];
int input_w = input_tensor->dims[3];
int pad_h0 = param->pad_h0;
int pad_w0 = param->pad_w0;
int pad_h1 = param->pad_h1;
int pad_w1 = param->pad_w1;
int padded_in_h = input_h + pad_h0 + pad_h1;
int padded_in_w = input_w + pad_w0 + pad_w1;
priv_info->input_pad = sys_malloc(batch * input_c * padded_in_h * padded_in_w * sizeof(float));
memset(priv_info->input_pad, 0, batch * input_c * padded_in_h * padded_in_w * sizeof(float));
return 0;
}
int conv_dw_run(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* bias_tensor,
struct tensor* output_tensor, struct conv_priv_info* conv_info, struct conv_param* param, int num_thread, int cpu_affinity)
{
/* param */
int pads[4];
int group = param->group;
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
pads[0] = param->pad_h0;
pads[1] = param->pad_w0;
pads[2] = param->pad_h1;
pads[3] = param->pad_w1;
if (stride_h != stride_w)
return -1;
int act_type = param->activation;
int batch = input_tensor->dims[0];
int in_c = input_tensor->dims[1] / group;
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
int input_size = in_c * in_h * in_w;
int out_c = output_tensor->dims[1] / group;
int out_h = output_tensor->dims[2];
int out_w = output_tensor->dims[3];
int output_size = out_c * out_h * out_w;
int padded_in_h = in_h + param->pad_h0 + param->pad_h1;
int padded_in_w = in_w + param->pad_w0 + param->pad_w1;
/* buffer addr */
float* input_buf = ( float* )input_tensor->data;
float* kernel_buf = ( float* )filter_tensor->data;
float* output_buf = ( float* )output_tensor->data;
float* biases_buf = NULL;
if (bias_tensor)
biases_buf = ( float* )bias_tensor->data;
for (int n = 0; n < batch; n++) // batch size
{
float* cur_input = input_buf + n * input_size * group;
float* cur_output = output_buf + n * output_size * group;
if (dilation_h != 1 && dilation_w != 1 && dilation_h == pads[0])
{
conv_dw_dilation_run(cur_input, kernel_buf, biases_buf, cur_output, in_h, in_w, group, pads[0], act_type,
num_thread);
}
else if (kernel_h == 3 && kernel_w == 3)
{
DirectConv(cur_input, in_h, in_w, cur_output, out_h, out_w, kernel_buf, group, stride_h, biases_buf, pads,
act_type, num_thread, cpu_affinity);
}
else if (kernel_h == 5 && kernel_w == 5)
{
if (stride_h == 1)
{
pad_0_align_3D((float*)conv_info->input_pad + n * group * padded_in_h * padded_in_w, cur_input,
in_h, in_w, padded_in_h, padded_in_w, group, param->pad_h0, param->pad_w0);
depthwise_conv_k5s1((float*)conv_info->input_pad, kernel_buf, biases_buf, cur_output, padded_in_h, padded_in_w, group, out_h, out_w,
act_type, num_thread);
}
else if (stride_h == 2)
depthwise_conv_k5s2(cur_input, kernel_buf, biases_buf, cur_output, in_h, in_w, group, out_h, out_w,
act_type, num_thread);
}
else if (kernel_h == 7 && kernel_w == 7)
{
if (stride_h == 1)
depthwise_conv_k7s1(cur_input, kernel_buf, biases_buf, cur_output, in_h, in_w, group, out_h, out_w,
act_type, num_thread);
else if (stride_h == 2)
depthwise_conv_k7s2(cur_input, kernel_buf, biases_buf, cur_output, in_h, in_w, group, out_h, out_w,
act_type, num_thread);
}
}
return 0;
}
int conv_dw_postrun(struct conv_priv_info* priv_info)
{
if (priv_info->input_pad != NULL)
{
sys_free(priv_info->input_pad);
priv_info->input_pad = NULL;
}
return 0;
} |
cg.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "globals.h"
#include "randdp.h"
#include "timers.h"
#include <omp.h>
//---------------------------------------------------------------------
#define CACHE_LINE_SIZE_PAD 128
#define INT_PAD_SIZE CACHE_LINE_SIZE_PAD/sizeof(int)
#define DOUBLE_PAD_SIZE CACHE_LINE_SIZE_PAD/sizeof(double)
/* common / main_int_mem / */
static int colidx[NZ];
static int rowstr[NA+1];
static int iv[NA];
static int arow[NA];
static int acol[NAZ];
/* common / main_flt_mem / */
static double aelt[NAZ];
static double a[NZ];
static double x[NA+2];
static double z[NA+2];
static double p[NA+2];
static double q[NA+2];
static double r[NA+2];
/* common / partit_size / */
static int naa;
static int nzz;
static int firstrow;
static int lastrow;
static int firstcol;
static int lastcol;
/* common /urando/ */
static double amult;
static double tran;
/* common /timers/ */
static logical timeron;
//---------------------------------------------------------------------
//---------------------------------------------------------------------
static void conj_grad(int colidx[],
int rowstr[],
double x[],
double z[],
double a[],
double p[],
double q[],
double r[],
double *rnorm);
static void makea(int n,
int nz,
double a[],
int colidx[],
int rowstr[],
int firstrow,
int lastrow,
int firstcol,
int lastcol,
int arow[],
int acol[][NONZER+1],
double aelt[][NONZER+1],
int iv[]);
static void sparse(double a[],
int colidx[],
int rowstr[],
int n,
int nz,
int nozer,
int arow[],
int acol[][NONZER+1],
double aelt[][NONZER+1],
int firstrow,
int lastrow,
int nzloc[],
double rcond,
double shift);
static void sprnvc(int n, int nz, int nn1, double v[], int iv[]);
static int icnvrt(double x, int ipwr2);
static void vecset(int n, double v[], int iv[], int *nzv, int i, double val);
//---------------------------------------------------------------------
int main(int argc, char *argv[])
{
omp_set_num_threads(omp_get_num_procs());
int i, j, k, it;
double zeta;
double rnorm;
double norm_temp1, norm_temp2;
double t, mflops, tmax;
//char Class;
logical verified;
double zeta_verify_value, epsilon, err;
char *t_names[T_last];
for (i = 0; i < T_last; i++) {
timer_clear(i);
}
timer_start(T_init);
firstrow = 0;
lastrow = NA-1;
firstcol = 0;
lastcol = NA-1;
zeta_verify_value = VALID_RESULT;
printf("\nCG start...\n\n");
printf(" Size: %11d\n", NA);
printf(" Iterations: %5d\n", NITER);
printf("\n");
naa = NA;
nzz = NZ;
//---------------------------------------------------------------------
// Inialize random number generator
//---------------------------------------------------------------------
tran = 314159265.0;
amult = 1220703125.0;
zeta = randlc(&tran, amult);
//---------------------------------------------------------------------
//
//---------------------------------------------------------------------
makea(naa, nzz, a, colidx, rowstr,
firstrow, lastrow, firstcol, lastcol,
arow,
(int (*)[NONZER+1])(void*)acol,
(double (*)[NONZER+1])(void*)aelt,
iv);
//---------------------------------------------------------------------
// Note: as a result of the above call to makea:
// values of j used in indexing rowstr go from 0 --> lastrow-firstrow
// values of colidx which are col indexes go from firstcol --> lastcol
// So:
// Shift the col index vals from actual (firstcol --> lastcol )
// to local, i.e., (0 --> lastcol-firstcol)
//---------------------------------------------------------------------
#pragma omp parallel default(shared) private(i,j,k)
{
#pragma omp for nowait
for (j = 0; j < lastrow - firstrow + 1; j++) {
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
colidx[k] = colidx[k] - firstcol;
}
}
//---------------------------------------------------------------------
// set starting vector to (1, 1, .... 1)
//---------------------------------------------------------------------
#pragma omp for nowait
for (i = 0; i < NA+1; i++) {
x[i] = 1.0;
}
#pragma omp for nowait
for (j = 0; j < lastcol - firstcol + 1; j++) {
q[j] = 0.0;
z[j] = 0.0;
r[j] = 0.0;
p[j] = 0.0;
}
}
zeta = 0.0;
//---------------------------------------------------------------------
//---->
// Do one iteration untimed to init all code and data page tables
//----> (then reinit, start timing, to niter its)
//---------------------------------------------------------------------
for (it = 1; it <= 1; it++) {
//---------------------------------------------------------------------
// The call to the conjugate gradient routine:
//---------------------------------------------------------------------
conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm);
//---------------------------------------------------------------------
// zeta = shift + 1/(x.z)
// So, first: (x.z)
// Also, find norm of z
// So, first: (z.z)
//---------------------------------------------------------------------
norm_temp1 = 0.0;
norm_temp2 = 0.0;
#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp1,norm_temp2)
for (j = 0; j < lastcol - firstcol + 1; j++) {
norm_temp1 = norm_temp1 + x[j] * z[j];
norm_temp2 = norm_temp2 + z[j] * z[j];
}
norm_temp2 = 1.0 / sqrt(norm_temp2);
//---------------------------------------------------------------------
// Normalize z to obtain x
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < lastcol - firstcol + 1; j++) {
x[j] = norm_temp2 * z[j];
}
} // end of do one iteration untimed
//---------------------------------------------------------------------
// set starting vector to (1, 1, .... 1)
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(i)
for (i = 0; i < NA+1; i++) {
x[i] = 1.0;
}
zeta = 0.0;
timer_stop(T_init);
printf(" Initialization time = %15.3f seconds\n", timer_read(T_init));
timer_start(T_bench);
//---------------------------------------------------------------------
//---->
// Main Iteration for inverse power method
//---->
//---------------------------------------------------------------------
for (it = 1; it <= NITER; it++) {
//---------------------------------------------------------------------
// The call to the conjugate gradient routine:
//---------------------------------------------------------------------
if (timeron) timer_start(T_conj_grad);
conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm);
if (timeron) timer_stop(T_conj_grad);
//---------------------------------------------------------------------
// zeta = shift + 1/(x.z)
// So, first: (x.z)
// Also, find norm of z
// So, first: (z.z)
//---------------------------------------------------------------------
norm_temp1 = 0.0;
norm_temp2 = 0.0;
#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp1,norm_temp2)
for (j = 0; j < lastcol - firstcol + 1; j++) {
norm_temp1 = norm_temp1 + x[j]*z[j];
norm_temp2 = norm_temp2 + z[j]*z[j];
}
norm_temp2 = 1.0 / sqrt(norm_temp2);
zeta = SHIFT + 1.0 / norm_temp1;
if (it == 1)
printf("\n iteration ||r|| zeta\n");
printf(" %5d %20.14E%20.13f\n", it, rnorm, zeta);
//---------------------------------------------------------------------
// Normalize z to obtain x
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < lastcol - firstcol + 1; j++) {
x[j] = norm_temp2 * z[j];
}
} // end of main iter inv pow meth
timer_stop(T_bench);
//---------------------------------------------------------------------
// End of timed section
//---------------------------------------------------------------------
t = timer_read(T_bench);
printf("\nComplete...\n");
epsilon = 1.0e-10;
err = fabs(zeta - zeta_verify_value) / zeta_verify_value;
if (err <= epsilon) {
verified = true;
printf(" VERIFICATION SUCCESSFUL\n");
printf(" Zeta is %20.13E\n", zeta);
printf(" Error is %20.13E\n", err);
} else {
verified = false;
printf(" VERIFICATION FAILED\n");
printf(" Zeta %20.13E\n", zeta);
printf(" The correct zeta is %20.13E\n", zeta_verify_value);
}
printf("\n\nExecution time : %lf seconds\n\n", t);
return 0;
}
//---------------------------------------------------------------------
// Floaging point arrays here are named as in spec discussion of
// CG algorithm
//---------------------------------------------------------------------
static void conj_grad(int colidx[],
int rowstr[],
double x[],
double z[],
double a[],
double p[],
double q[],
double r[],
double *rnorm)
{
int j, k, j1, j2, j3;
int cgit, cgitmax = 25;
double d, sum, rho, rho0, alpha, beta;
int total_num = naa+1;
int residue = (total_num)%8;
int rho_num = lastcol-firstcol+1;
int residue2 = (rho_num)%8;
//---------------------------------------------------------------------
// Initialize the CG algorithm:
//---------------------------------------------------------------------
#pragma omp parallel default(shared) private(j, k)
{
#pragma omp for nowait
for (k = 0; k < residue; k++) {
q[k] = 0.0;
z[k] = 0.0;
r[k] = x[k];
p[k] = x[k];
}
#pragma omp for nowait
for (j2 = residue; j2 < total_num; j2=j2+8) {
r[j2] = x[j2];
r[j2+1] = x[j2+1];
r[j2+2] = x[j2+2];
r[j2+3] = x[j2+3];
r[j2+4] = x[j2+4];
r[j2+5] = x[j2+5];
r[j2+6] = x[j2+6];
r[j2+7] = x[j2+7];
}
#pragma omp for nowait
for (j = residue; j < total_num; j=j+8) {
q[j] = 0.0;
q[j+1] = 0.0;
q[j+2] = 0.0;
q[j+3] = 0.0;
q[j+4] = 0.0;
q[j+5] = 0.0;
q[j+6] = 0.0;
q[j+7] = 0.0;
}
#pragma omp for nowait
for (j1 = residue; j1 < total_num; j1=j1+8) {
z[j1] = 0.0;
z[j1+1] = 0.0;
z[j1+2] = 0.0;
z[j1+3] = 0.0;
z[j1+4] = 0.0;
z[j1+5] = 0.0;
z[j1+6] = 0.0;
z[j1+7] = 0.0;
}
#pragma omp for nowait
for (j3 = residue; j3 < total_num; j3=j3+8) {
p[j3] = x[j3];
p[j3+1] = x[j3+1];
p[j3+2] = x[j3+2];
p[j3+3] = x[j3+3];
p[j3+4] = x[j3+4];
p[j3+5] = x[j3+5];
p[j3+6] = x[j3+6];
p[j3+7] = x[j3+7];
}
//---------------------------------------------------------------------
// rho = r.r
// Now, obtain the norm of r: First, sum squares of r elements locally...
//---------------------------------------------------------------------
//#pragma omp for reduction(+:rho)
#pragma omp single
for (j = 0; j < residue2; j++) {
rho = rho + r[j]*r[j];
}
#pragma omp for reduction(+:rho)
for(j=residue2; j<rho_num; j+=8){
rho = rho + r[j]*r[j]
+ r[j+1]*r[j+1]
+ r[j+2]*r[j+2]
+ r[j+3]*r[j+3]
+ r[j+4]*r[j+4]
+ r[j+5]*r[j+5]
+ r[j+6]*r[j+6]
+ r[j+7]*r[j+7];
}
}
//---------------------------------------------------------------------
//---->
// The conj grad iteration loop
//---->
//---------------------------------------------------------------------
for (cgit = 1; cgit <= cgitmax; cgit++) {
//---------------------------------------------------------------------
// q = A.p
// The partition submatrix-vector multiply: use workspace w
//---------------------------------------------------------------------
//
// NOTE: this version of the multiply is actually (slightly: maybe %5)
// faster on the sp2 on 16 nodes than is the unrolled-by-2 version
// below. On the Cray t3d, the reverse is true, i.e., the
// unrolled-by-two version is some 10% faster.
// The unrolled-by-8 version below is significantly faster
// on the Cray t3d - overall speed of code is 1.5 times faster.
rho0 = rho;
d = 0.0;
rho = 0.0;
#pragma omp parallel default(shared) private(j, k, sum)
{
#pragma omp for
for (j = 0; j <= lastrow-firstrow+1; j++) {
int iresidue;
int i = rowstr[j];
iresidue = (rowstr[j+1]-i) % 8;
sum = 0.0;
for (k = i; k <= i+iresidue-1; k++) {
sum = sum + a[k] * p[colidx[k]];
}
for (k = i+iresidue; k <= rowstr[j+1]-8; k += 8) {
sum = sum + a[k ] * p[colidx[k ]]
+ a[k+1] * p[colidx[k+1]]
+ a[k+2] * p[colidx[k+2]]
+ a[k+3] * p[colidx[k+3]]
+ a[k+4] * p[colidx[k+4]]
+ a[k+5] * p[colidx[k+5]]
+ a[k+6] * p[colidx[k+6]]
+ a[k+7] * p[colidx[k+7]];
}
q[j] = sum;
}
}
//---------------------------------------------------------------------
// Obtain p.q
//---------------------------------------------------------------------
#pragma omp parallel default(shared) private(j)
{
//#pragma omp for reduction(+:d)
#pragma omp single
for (j = 0; j < residue2; j+=1) {
d = d + p[j]*q[j];
}
#pragma omp for reduction(+:d)
for (j = residue2; j < rho_num; j+=8) {
d = d + p[j]*q[j]
+ p[j+1]*q[j+1]
+ p[j+2]*q[j+2]
+ p[j+3]*q[j+3]
+ p[j+4]*q[j+4]
+ p[j+5]*q[j+5]
+ p[j+6]*q[j+6]
+ p[j+7]*q[j+7];
}
}
//---------------------------------------------------------------------
// Obtain alpha = rho / (p.q)
//---------------------------------------------------------------------
alpha = rho0 / d;
//---------------------------------------------------------------------
// Save a temporary of rho
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// Obtain z = z + alpha*p
// and r = r - alpha*q
//---------------------------------------------------------------------
#pragma omp parallel default(shared) private(j)
{
#pragma omp single
for (j = 0; j < residue2; j+=1) {
z[j] = z[j] + alpha*p[j];
r[j] = r[j] - alpha*q[j];
}
#pragma omp for reduction(+:d)
for (j = residue2; j < rho_num; j+=8) {
z[j] = z[j] + alpha*p[j];
z[j+1] = z[j+1] + alpha*p[j+1];
z[j+2] = z[j+2] + alpha*p[j+2];
z[j+3] = z[j+3] + alpha*p[j+3];
z[j+4] = z[j+4] + alpha*p[j+4];
z[j+5] = z[j+5] + alpha*p[j+5];
z[j+6] = z[j+6] + alpha*p[j+6];
z[j+7] = z[j+7] + alpha*p[j+7];
r[j] = r[j] - alpha*q[j];
r[j+1] = r[j+1] - alpha*q[j+1];
r[j+2] = r[j+2] - alpha*q[j+2];
r[j+3] = r[j+3] - alpha*q[j+3];
r[j+4] = r[j+4] - alpha*q[j+4];
r[j+5] = r[j+5] - alpha*q[j+5];
r[j+6] = r[j+6] - alpha*q[j+6];
r[j+7] = r[j+7] - alpha*q[j+7];
}
}
//---------------------------------------------------------------------
// rho = r.r
// Now, obtain the norm of r: First, sum squares of r elements locally...
//---------------------------------------------------------------------
#pragma omp parallel default(shared) private(j)
{
#pragma omp single
for (j = 0; j < residue2; j++) {
rho = rho + r[j]*r[j];
}
#pragma omp for reduction(+:rho)
for (j = residue2; j < rho_num; j+=8) {
rho = rho + r[j]*r[j]
+ r[j+1]*r[j+1]
+ r[j+2]*r[j+2]
+ r[j+3]*r[j+3]
+ r[j+4]*r[j+4]
+ r[j+5]*r[j+5]
+ r[j+6]*r[j+6]
+ r[j+7]*r[j+7];
}
}
//---------------------------------------------------------------------
// Obtain beta:
//---------------------------------------------------------------------
beta = rho / rho0;
//---------------------------------------------------------------------
// p = r + beta*p
//---------------------------------------------------------------------
#pragma omp parallel default(shared) private(j)
{
#pragma omp single
for (j = 0; j < residue2; j+=1) {
p[j] = r[j] + beta*p[j];
}
#pragma omp for
for (j = residue2; j < rho_num; j+=8) {
p[j] = r[j] + beta*p[j];
p[j+1] = r[j+1] + beta*p[j+1];
p[j+2] = r[j+2] + beta*p[j+2];
p[j+3] = r[j+3] + beta*p[j+3];
p[j+4] = r[j+4] + beta*p[j+4];
p[j+5] = r[j+5] + beta*p[j+5];
p[j+6] = r[j+6] + beta*p[j+6];
p[j+7] = r[j+7] + beta*p[j+7];
}
}
} // end of do cgit=1,cgitmax
//---------------------------------------------------------------------
// Compute residual norm explicitly: ||r|| = ||x - A.z||
// First, form A.z
// The partition submatrix-vector multiply
//---------------------------------------------------------------------
sum = 0.0;
#pragma omp parallel default(shared) private(j, d) shared(sum)
{
#pragma omp for
for (j = 0; j < rho_num; j++) {
d = 0.0;
int iresidue;
int i = rowstr[j];
iresidue = (rowstr[j+1] - i)%8;
for (k=i; k<i+iresidue; k++){
d = d+ a[k]*z[colidx[k]];
}
for (k = i+iresidue; k < rowstr[j+1]; k+=8) {
d = d + a[k]*z[colidx[k]]
+ a[k+1]*z[colidx[k+1]]
+ a[k+2]*z[colidx[k+2]]
+ a[k+3]*z[colidx[k+3]]
+ a[k+4]*z[colidx[k+4]]
+ a[k+5]*z[colidx[k+5]]
+ a[k+6]*z[colidx[k+6]]
+ a[k+7]*z[colidx[k+7]];
}
r[j] = d;
}
//---------------------------------------------------------------------
// At this point, r contains A.z
//---------------------------------------------------------------------
#pragma omp single
for (j = 0; j < residue2; j+=1) {
double d_tmp = x[j] - r[j];
sum = sum + d_tmp*d_tmp;
}
#pragma omp for reduction(+:sum)
for (j = residue2; j < rho_num; j+=8) {
sum = sum + (x[j]-r[j])*(x[j]-r[j])
+ (x[j+1]-r[j+1])*(x[j+1]-r[j+1])
+ (x[j+2]-r[j+2])*(x[j+2]-r[j+2])
+ (x[j+3]-r[j+3])*(x[j+3]-r[j+3])
+ (x[j+4]-r[j+4])*(x[j+4]-r[j+4])
+ (x[j+5]-r[j+5])*(x[j+5]-r[j+5])
+ (x[j+6]-r[j+6])*(x[j+6]-r[j+6])
+ (x[j+7]-r[j+7])*(x[j+7]-r[j+7]);
}
}
*rnorm = sqrt(sum);
}
//---------------------------------------------------------------------
// generate the test problem for benchmark 6
// makea generates a sparse matrix with a
// prescribed sparsity distribution
//
// parameter type usage
//
// input
//
// n i number of cols/rows of matrix
// nz i nonzeros as declared array size
// rcond r*8 condition number
// shift r*8 main diagonal shift
//
// output
//
// a r*8 array for nonzeros
// colidx i col indices
// rowstr i row pointers
//
// workspace
//
// iv, arow, acol i
// aelt r*8
//---------------------------------------------------------------------
static void makea(int n,
int nz,
double a[],
int colidx[],
int rowstr[],
int firstrow,
int lastrow,
int firstcol,
int lastcol,
int arow[],
int acol[][NONZER+1],
double aelt[][NONZER+1],
int iv[])
{
int iouter, ivelt, nzv, nn1;
int ivc[NONZER+1];
double vc[NONZER+1];
//---------------------------------------------------------------------
// nonzer is approximately (int(sqrt(nnza /n)));
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// nn1 is the smallest power of two not less than n
//---------------------------------------------------------------------
nn1 = 1;
do {
nn1 = 2 * nn1;
} while (nn1 < n);
//---------------------------------------------------------------------
// Generate nonzero positions and save for the use in sparse.
//---------------------------------------------------------------------
for (iouter = 0; iouter < n; iouter++) {
nzv = NONZER;
sprnvc(n, nzv, nn1, vc, ivc);
vecset(n, vc, ivc, &nzv, iouter+1, 0.5);
arow[iouter] = nzv;
for (ivelt = 0; ivelt < nzv; ivelt++) {
acol[iouter][ivelt] = ivc[ivelt] - 1;
aelt[iouter][ivelt] = vc[ivelt];
}
}
//---------------------------------------------------------------------
// ... make the sparse matrix from list of elements with duplicates
// (iv is used as workspace)
//---------------------------------------------------------------------
sparse(a, colidx, rowstr, n, nz, NONZER, arow, acol,
aelt, firstrow, lastrow,
iv, RCOND, SHIFT);
}
//---------------------------------------------------------------------
// rows range from firstrow to lastrow
// the rowstr pointers are defined for nrows = lastrow-firstrow+1 values
//---------------------------------------------------------------------
static void sparse(double a[],
int colidx[],
int rowstr[],
int n,
int nz,
int nozer,
int arow[],
int acol[][NONZER+1],
double aelt[][NONZER+1],
int firstrow,
int lastrow,
int nzloc[],
double rcond,
double shift)
{
int nrows;
//---------------------------------------------------
// generate a sparse matrix from a list of
// [col, row, element] tri
//---------------------------------------------------
int i, j, j1, j2, nza, k, kk, nzrow, jcol;
double size, scale, ratio, va;
logical cont40;
//---------------------------------------------------------------------
// how many rows of result
//---------------------------------------------------------------------
nrows = lastrow - firstrow + 1;
//---------------------------------------------------------------------
// ...count the number of triples in each row
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < nrows+1; j++) {
rowstr[j] = 0;
}
for (i = 0; i < n; i++) {
for (nza = 0; nza < arow[i]; nza++) {
j = acol[i][nza] + 1;
rowstr[j] = rowstr[j] + arow[i];
}
}
rowstr[0] = 0;
for (j = 1; j < nrows+1; j++) {
rowstr[j] = rowstr[j] + rowstr[j-1];
}
nza = rowstr[nrows] - 1;
//---------------------------------------------------------------------
// ... rowstr(j) now is the location of the first nonzero
// of row j of a
//---------------------------------------------------------------------
if (nza > nz) {
printf("Space for matrix elements exceeded in sparse\n");
printf("nza, nzmax = %d, %d\n", nza, nz);
exit(EXIT_FAILURE);
}
//---------------------------------------------------------------------
// ... preload data pages
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j, k)
for (j = 0; j < nrows; j++) {
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
a[k] = 0.0;
colidx[k] = -1;
}
nzloc[j] = 0;
}
//---------------------------------------------------------------------
// ... generate actual values by summing duplicates
//---------------------------------------------------------------------
size = 1.0;
ratio = pow(rcond, (1.0 / (double)(n)));
for (i = 0; i < n; i++) {
for (nza = 0; nza < arow[i]; nza++) {
j = acol[i][nza];
scale = size * aelt[i][nza];
for (nzrow = 0; nzrow < arow[i]; nzrow++) {
jcol = acol[i][nzrow];
va = aelt[i][nzrow] * scale;
//--------------------------------------------------------------------
// ... add the identity * rcond to the generated matrix to bound
// the smallest eigenvalue from below by rcond
//--------------------------------------------------------------------
if (jcol == j && j == i) {
va = va + rcond - shift;
}
cont40 = false;
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
if (colidx[k] > jcol) {
//----------------------------------------------------------------
// ... insert colidx here orderly
//----------------------------------------------------------------
for (kk = rowstr[j+1]-2; kk >= k; kk--) {
if (colidx[kk] > -1) {
a[kk+1] = a[kk];
colidx[kk+1] = colidx[kk];
}
}
colidx[k] = jcol;
a[k] = 0.0;
cont40 = true;
break;
} else if (colidx[k] == -1) {
colidx[k] = jcol;
cont40 = true;
break;
} else if (colidx[k] == jcol) {
//--------------------------------------------------------------
// ... mark the duplicated entry
//--------------------------------------------------------------
nzloc[j] = nzloc[j] + 1;
cont40 = true;
break;
}
}
if (cont40 == false) {
printf("internal error in sparse: i=%d\n", i);
exit(EXIT_FAILURE);
}
a[k] = a[k] + va;
}
}
size = size * ratio;
}
//---------------------------------------------------------------------
// ... remove empty entries and generate final results
//---------------------------------------------------------------------
for (j = 1; j < nrows; j++) {
nzloc[j] = nzloc[j] + nzloc[j-1];
}
for (j = 0; j < nrows; j++) {
if (j > 0) {
j1 = rowstr[j] - nzloc[j-1];
} else {
j1 = 0;
}
j2 = rowstr[j+1] - nzloc[j];
nza = rowstr[j];
for (k = j1; k < j2; k++) {
a[k] = a[nza];
colidx[k] = colidx[nza];
nza = nza + 1;
}
}
#pragma omp parallel for default(shared) private(j)
for (j = 1; j < nrows+1; j++) {
rowstr[j] = rowstr[j] - nzloc[j-1];
}
nza = rowstr[nrows] - 1;
}
//---------------------------------------------------------------------
// generate a sparse n-vector (v, iv)
// having nzv nonzeros
//
// mark(i) is set to 1 if position i is nonzero.
// mark is all zero on entry and is reset to all zero before exit
// this corrects a performance bug found by John G. Lewis, caused by
// reinitialization of mark on every one of the n calls to sprnvc
//---------------------------------------------------------------------
static void sprnvc(int n, int nz, int nn1, double v[], int iv[])
{
int nzv, ii, i;
double vecelt, vecloc;
nzv = 0;
while (nzv < nz) {
vecelt = randlc(&tran, amult);
//---------------------------------------------------------------------
// generate an integer between 1 and n in a portable manner
//---------------------------------------------------------------------
vecloc = randlc(&tran, amult);
i = icnvrt(vecloc, nn1) + 1;
if (i > n) continue;
//---------------------------------------------------------------------
// was this integer generated already?
//---------------------------------------------------------------------
logical was_gen = false;
for (ii = 0; ii < nzv; ii++) {
if (iv[ii] == i) {
was_gen = true;
break;
}
}
if (was_gen) continue;
v[nzv] = vecelt;
iv[nzv] = i;
nzv = nzv + 1;
}
}
//---------------------------------------------------------------------
// scale a double precision number x in (0,1) by a power of 2 and chop it
//---------------------------------------------------------------------
static int icnvrt(double x, int ipwr2)
{
return (int)(ipwr2 * x);
}
//---------------------------------------------------------------------
// set ith element of sparse vector (v, iv) with
// nzv nonzeros to val
//---------------------------------------------------------------------
static void vecset(int n, double v[], int iv[], int *nzv, int i, double val)
{
int k;
logical set;
set = false;
for (k = 0; k < *nzv; k++) {
if (iv[k] == i) {
v[k] = val;
set = true;
}
}
if (set == false) {
v[*nzv] = val;
iv[*nzv] = i;
*nzv = *nzv + 1;
}
}
|
cp-tree.h | /* Definitions for C++ parsing and type checking.
Copyright (C) 1987-2013 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com)
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_CP_TREE_H
#define GCC_CP_TREE_H
#include "ggc.h"
#include "function.h"
#include "hashtab.h"
#include "vec.h"
/* In order for the format checking to accept the C++ front end
diagnostic framework extensions, you must include this file before
diagnostic-core.h, not after. We override the definition of GCC_DIAG_STYLE
in c-common.h. */
#undef GCC_DIAG_STYLE
#define GCC_DIAG_STYLE __gcc_cxxdiag__
#if defined(GCC_DIAGNOSTIC_CORE_H) || defined (GCC_C_COMMON_H)
#error \
In order for the format checking to accept the C++ front end diagnostic \
framework extensions, you must include this file before diagnostic-core.h and \
c-common.h, not after.
#endif
#include "c-family/c-common.h"
#include "diagnostic.h"
#include "name-lookup.h"
/* Usage of TREE_LANG_FLAG_?:
0: IDENTIFIER_MARKED (IDENTIFIER_NODEs)
NEW_EXPR_USE_GLOBAL (in NEW_EXPR).
DELETE_EXPR_USE_GLOBAL (in DELETE_EXPR).
COMPOUND_EXPR_OVERLOADED (in COMPOUND_EXPR).
TREE_INDIRECT_USING (in NAMESPACE_DECL).
CLEANUP_P (in TRY_BLOCK)
AGGR_INIT_VIA_CTOR_P (in AGGR_INIT_EXPR)
PTRMEM_OK_P (in ADDR_EXPR, OFFSET_REF, SCOPE_REF)
PAREN_STRING_LITERAL (in STRING_CST)
DECL_GNU_TLS_P (in VAR_DECL)
KOENIG_LOOKUP_P (in CALL_EXPR)
STATEMENT_LIST_NO_SCOPE (in STATEMENT_LIST).
EXPR_STMT_STMT_EXPR_RESULT (in EXPR_STMT)
STMT_EXPR_NO_SCOPE (in STMT_EXPR)
BIND_EXPR_TRY_BLOCK (in BIND_EXPR)
TYPENAME_IS_ENUM_P (in TYPENAME_TYPE)
OMP_FOR_GIMPLIFYING_P (in OMP_FOR)
BASELINK_QUALIFIED_P (in BASELINK)
TARGET_EXPR_IMPLICIT_P (in TARGET_EXPR)
TEMPLATE_PARM_PARAMETER_PACK (in TEMPLATE_PARM_INDEX)
ATTR_IS_DEPENDENT (in the TREE_LIST for an attribute)
CONSTRUCTOR_IS_DIRECT_INIT (in CONSTRUCTOR)
LAMBDA_EXPR_CAPTURES_THIS_P (in LAMBDA_EXPR)
DECLTYPE_FOR_LAMBDA_CAPTURE (in DECLTYPE_TYPE)
VEC_INIT_EXPR_IS_CONSTEXPR (in VEC_INIT_EXPR)
DECL_OVERRIDE_P (in FUNCTION_DECL)
IMPLICIT_CONV_EXPR_DIRECT_INIT (in IMPLICIT_CONV_EXPR)
TRANSACTION_EXPR_IS_STMT (in TRANSACTION_EXPR)
CONVERT_EXPR_VBASE_PATH (in CONVERT_EXPR)
OVL_ARG_DEPENDENT (in OVERLOAD)
PACK_EXPANSION_LOCAL_P (in *_PACK_EXPANSION)
TINFO_RECHECK_ACCESS_P (in TEMPLATE_INFO)
SIZEOF_EXPR_TYPE_P (in SIZEOF_EXPR)
1: IDENTIFIER_VIRTUAL_P (in IDENTIFIER_NODE)
TI_PENDING_TEMPLATE_FLAG.
TEMPLATE_PARMS_FOR_INLINE.
DELETE_EXPR_USE_VEC (in DELETE_EXPR).
(TREE_CALLS_NEW) (in _EXPR or _REF) (commented-out).
ICS_ELLIPSIS_FLAG (in _CONV)
DECL_INITIALIZED_P (in VAR_DECL)
TYPENAME_IS_CLASS_P (in TYPENAME_TYPE)
STMT_IS_FULL_EXPR_P (in _STMT)
TARGET_EXPR_LIST_INIT_P (in TARGET_EXPR)
LAMBDA_EXPR_MUTABLE_P (in LAMBDA_EXPR)
DECL_FINAL_P (in FUNCTION_DECL)
QUALIFIED_NAME_IS_TEMPLATE (in SCOPE_REF)
2: IDENTIFIER_OPNAME_P (in IDENTIFIER_NODE)
ICS_THIS_FLAG (in _CONV)
DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (in VAR_DECL)
STATEMENT_LIST_TRY_BLOCK (in STATEMENT_LIST)
TYPENAME_IS_RESOLVING_P (in TYPE_NAME_TYPE)
TARGET_EXPR_DIRECT_INIT_P (in TARGET_EXPR)
FNDECL_USED_AUTO (in FUNCTION_DECL)
3: (TREE_REFERENCE_EXPR) (in NON_LVALUE_EXPR) (commented-out).
ICS_BAD_FLAG (in _CONV)
FN_TRY_BLOCK_P (in TRY_BLOCK)
IDENTIFIER_CTOR_OR_DTOR_P (in IDENTIFIER_NODE)
BIND_EXPR_BODY_BLOCK (in BIND_EXPR)
DECL_NON_TRIVIALLY_INITIALIZED_P (in VAR_DECL)
4: TREE_HAS_CONSTRUCTOR (in INDIRECT_REF, SAVE_EXPR, CONSTRUCTOR,
or FIELD_DECL).
IDENTIFIER_TYPENAME_P (in IDENTIFIER_NODE)
DECL_TINFO_P (in VAR_DECL)
FUNCTION_REF_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE)
5: C_IS_RESERVED_WORD (in IDENTIFIER_NODE)
DECL_VTABLE_OR_VTT_P (in VAR_DECL)
FUNCTION_RVALUE_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE)
6: IDENTIFIER_REPO_CHOSEN (in IDENTIFIER_NODE)
DECL_CONSTRUCTION_VTABLE_P (in VAR_DECL)
TYPE_MARKED_P (in _TYPE)
Usage of TYPE_LANG_FLAG_?:
0: TYPE_DEPENDENT_P
1: TYPE_HAS_USER_CONSTRUCTOR.
2: unused
3: TYPE_FOR_JAVA.
4: TYPE_HAS_NONTRIVIAL_DESTRUCTOR
5: CLASS_TYPE_P (in RECORD_TYPE and UNION_TYPE)
ENUM_FIXED_UNDERLYING_TYPE_P (in ENUMERAL_TYPE)
6: TYPE_DEPENDENT_P_VALID
Usage of DECL_LANG_FLAG_?:
0: DECL_ERROR_REPORTED (in VAR_DECL).
DECL_TEMPLATE_PARM_P (in PARM_DECL, CONST_DECL, TYPE_DECL, or TEMPLATE_DECL)
DECL_LOCAL_FUNCTION_P (in FUNCTION_DECL)
DECL_MUTABLE_P (in FIELD_DECL)
DECL_DEPENDENT_P (in USING_DECL)
1: C_TYPEDEF_EXPLICITLY_SIGNED (in TYPE_DECL).
DECL_TEMPLATE_INSTANTIATED (in a VAR_DECL or a FUNCTION_DECL)
DECL_MEMBER_TEMPLATE_P (in TEMPLATE_DECL)
FUNCTION_PARAMETER_PACK_P (in PARM_DECL)
USING_DECL_TYPENAME_P (in USING_DECL)
2: DECL_THIS_EXTERN (in VAR_DECL or FUNCTION_DECL).
DECL_IMPLICIT_TYPEDEF_P (in a TYPE_DECL)
3: DECL_IN_AGGR_P.
4: DECL_C_BIT_FIELD (in a FIELD_DECL)
DECL_ANON_UNION_VAR_P (in a VAR_DECL)
DECL_SELF_REFERENCE_P (in a TYPE_DECL)
DECL_INVALID_OVERRIDER_P (in a FUNCTION_DECL)
5: DECL_INTERFACE_KNOWN.
6: DECL_THIS_STATIC (in VAR_DECL or FUNCTION_DECL).
DECL_FIELD_IS_BASE (in FIELD_DECL)
TYPE_DECL_ALIAS_P (in TYPE_DECL)
7: DECL_DEAD_FOR_LOCAL (in VAR_DECL).
DECL_THUNK_P (in a member FUNCTION_DECL)
DECL_NORMAL_CAPTURE_P (in FIELD_DECL)
8: DECL_DECLARED_CONSTEXPR_P (in VAR_DECL, FUNCTION_DECL)
Usage of language-independent fields in a language-dependent manner:
TYPE_ALIAS_SET
This field is used by TYPENAME_TYPEs, TEMPLATE_TYPE_PARMs, and so
forth as a substitute for the mark bits provided in `lang_type'.
At present, only the six low-order bits are used.
TYPE_LANG_SLOT_1
For an ENUMERAL_TYPE, this is ENUM_TEMPLATE_INFO.
For a FUNCTION_TYPE or METHOD_TYPE, this is TYPE_RAISES_EXCEPTIONS
BINFO_VIRTUALS
For a binfo, this is a TREE_LIST. There is an entry for each
virtual function declared either in BINFO or its direct and
indirect primary bases.
The BV_DELTA of each node gives the amount by which to adjust the
`this' pointer when calling the function. If the method is an
overridden version of a base class method, then it is assumed
that, prior to adjustment, the this pointer points to an object
of the base class.
The BV_VCALL_INDEX of each node, if non-NULL, gives the vtable
index of the vcall offset for this entry.
The BV_FN is the declaration for the virtual function itself.
If BV_LOST_PRIMARY is set, it means that this entry is for a lost
primary virtual base and can be left null in the vtable.
BINFO_VTABLE
This is an expression with POINTER_TYPE that gives the value
to which the vptr should be initialized. Use get_vtbl_decl_for_binfo
to extract the VAR_DECL for the complete vtable.
DECL_VINDEX
This field is NULL for a non-virtual function. For a virtual
function, it is eventually set to an INTEGER_CST indicating the
index in the vtable at which this function can be found. When
a virtual function is declared, but before it is known what
function is overridden, this field is the error_mark_node.
Temporarily, it may be set to a TREE_LIST whose TREE_VALUE is
the virtual function this one overrides, and whose TREE_CHAIN is
the old DECL_VINDEX. */
/* Language-specific tree checkers. */
#define VAR_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK2(NODE,VAR_DECL,FUNCTION_DECL)
#define TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK(NODE) \
TREE_CHECK3(NODE,TYPE_DECL,TEMPLATE_DECL,FUNCTION_DECL)
#define TYPE_FUNCTION_OR_TEMPLATE_DECL_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL || TREE_CODE (NODE) == TEMPLATE_DECL \
|| TREE_CODE (NODE) == FUNCTION_DECL)
#define VAR_FUNCTION_OR_PARM_DECL_CHECK(NODE) \
TREE_CHECK3(NODE,VAR_DECL,FUNCTION_DECL,PARM_DECL)
#define VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK4(NODE,VAR_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL)
#define VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK5(NODE,VAR_DECL,FIELD_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL)
#define BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK(NODE) \
TREE_CHECK(NODE,BOUND_TEMPLATE_TEMPLATE_PARM)
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define THUNK_FUNCTION_CHECK(NODE) __extension__ \
({ __typeof (NODE) const __t = (NODE); \
if (TREE_CODE (__t) != FUNCTION_DECL || !__t->decl_common.lang_specific \
|| !__t->decl_common.lang_specific->u.fn.thunk_p) \
tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, 0); \
__t; })
#else
#define THUNK_FUNCTION_CHECK(NODE) (NODE)
#endif
/* Language-dependent contents of an identifier. */
struct GTY(()) lang_identifier {
struct c_common_identifier c_common;
cxx_binding *namespace_bindings;
cxx_binding *bindings;
tree class_template_info;
tree label_value;
};
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword,
and C_RID_YYCODE is the token number wanted by Yacc. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_5 (ID)
#define LANG_IDENTIFIER_CAST(NODE) \
((struct lang_identifier*)IDENTIFIER_NODE_CHECK (NODE))
struct GTY(()) template_parm_index_s {
struct tree_common common;
int index;
int level;
int orig_level;
tree decl;
};
typedef struct template_parm_index_s template_parm_index;
struct GTY(()) ptrmem_cst {
struct tree_common common;
tree member;
};
typedef struct ptrmem_cst * ptrmem_cst_t;
#define IDENTIFIER_GLOBAL_VALUE(NODE) \
namespace_binding ((NODE), global_namespace)
#define SET_IDENTIFIER_GLOBAL_VALUE(NODE, VAL) \
set_namespace_binding ((NODE), global_namespace, (VAL))
#define IDENTIFIER_NAMESPACE_VALUE(NODE) \
namespace_binding ((NODE), current_namespace)
#define SET_IDENTIFIER_NAMESPACE_VALUE(NODE, VAL) \
set_namespace_binding ((NODE), current_namespace, (VAL))
#define CLEANUP_P(NODE) TREE_LANG_FLAG_0 (TRY_BLOCK_CHECK (NODE))
#define BIND_EXPR_TRY_BLOCK(NODE) \
TREE_LANG_FLAG_0 (BIND_EXPR_CHECK (NODE))
/* Used to mark the block around the member initializers and cleanups. */
#define BIND_EXPR_BODY_BLOCK(NODE) \
TREE_LANG_FLAG_3 (BIND_EXPR_CHECK (NODE))
#define FUNCTION_NEEDS_BODY_BLOCK(NODE) \
(DECL_CONSTRUCTOR_P (NODE) || DECL_DESTRUCTOR_P (NODE) \
|| LAMBDA_FUNCTION_P (NODE))
#define STATEMENT_LIST_NO_SCOPE(NODE) \
TREE_LANG_FLAG_0 (STATEMENT_LIST_CHECK (NODE))
#define STATEMENT_LIST_TRY_BLOCK(NODE) \
TREE_LANG_FLAG_2 (STATEMENT_LIST_CHECK (NODE))
/* Nonzero if this statement should be considered a full-expression,
i.e., if temporaries created during this statement should have
their destructors run at the end of this statement. */
#define STMT_IS_FULL_EXPR_P(NODE) TREE_LANG_FLAG_1 ((NODE))
/* Marks the result of a statement expression. */
#define EXPR_STMT_STMT_EXPR_RESULT(NODE) \
TREE_LANG_FLAG_0 (EXPR_STMT_CHECK (NODE))
/* Nonzero if this statement-expression does not have an associated scope. */
#define STMT_EXPR_NO_SCOPE(NODE) \
TREE_LANG_FLAG_0 (STMT_EXPR_CHECK (NODE))
/* Returns nonzero iff TYPE1 and TYPE2 are the same type, in the usual
sense of `same'. */
#define same_type_p(TYPE1, TYPE2) \
comptypes ((TYPE1), (TYPE2), COMPARE_STRICT)
/* Returns nonzero iff NODE is a declaration for the global function
`main'. */
#define DECL_MAIN_P(NODE) \
(DECL_EXTERN_C_FUNCTION_P (NODE) \
&& DECL_NAME (NODE) != NULL_TREE \
&& MAIN_NAME_P (DECL_NAME (NODE)) \
&& flag_hosted)
/* The overloaded FUNCTION_DECL. */
#define OVL_FUNCTION(NODE) \
(((struct tree_overload*)OVERLOAD_CHECK (NODE))->function)
#define OVL_CHAIN(NODE) TREE_CHAIN (NODE)
/* Polymorphic access to FUNCTION and CHAIN. */
#define OVL_CURRENT(NODE) \
((TREE_CODE (NODE) == OVERLOAD) ? OVL_FUNCTION (NODE) : (NODE))
#define OVL_NEXT(NODE) \
((TREE_CODE (NODE) == OVERLOAD) ? TREE_CHAIN (NODE) : NULL_TREE)
/* If set, this was imported in a using declaration.
This is not to confuse with being used somewhere, which
is not important for this node. */
#define OVL_USED(NODE) TREE_USED (NODE)
/* If set, this OVERLOAD was created for argument-dependent lookup
and can be freed afterward. */
#define OVL_ARG_DEPENDENT(NODE) TREE_LANG_FLAG_0 (OVERLOAD_CHECK (NODE))
struct GTY(()) tree_overload {
struct tree_common common;
tree function;
};
/* Returns true iff NODE is a BASELINK. */
#define BASELINK_P(NODE) \
(TREE_CODE (NODE) == BASELINK)
/* The BINFO indicating the base in which lookup found the
BASELINK_FUNCTIONS. */
#define BASELINK_BINFO(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->binfo)
/* The functions referred to by the BASELINK; either a FUNCTION_DECL,
a TEMPLATE_DECL, an OVERLOAD, or a TEMPLATE_ID_EXPR. */
#define BASELINK_FUNCTIONS(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->functions)
/* The BINFO in which the search for the functions indicated by this baselink
began. This base is used to determine the accessibility of functions
selected by overload resolution. */
#define BASELINK_ACCESS_BINFO(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->access_binfo)
/* For a type-conversion operator, the BASELINK_OPTYPE indicates the type
to which the conversion should occur. This value is important if
the BASELINK_FUNCTIONS include a template conversion operator --
the BASELINK_OPTYPE can be used to determine what type the user
requested. */
#define BASELINK_OPTYPE(NODE) \
(TREE_CHAIN (BASELINK_CHECK (NODE)))
/* Nonzero if this baselink was from a qualified lookup. */
#define BASELINK_QUALIFIED_P(NODE) \
TREE_LANG_FLAG_0 (BASELINK_CHECK (NODE))
struct GTY(()) tree_baselink {
struct tree_common common;
tree binfo;
tree functions;
tree access_binfo;
};
/* The different kinds of ids that we encounter. */
typedef enum cp_id_kind
{
/* Not an id at all. */
CP_ID_KIND_NONE,
/* An unqualified-id that is not a template-id. */
CP_ID_KIND_UNQUALIFIED,
/* An unqualified-id that is a dependent name. */
CP_ID_KIND_UNQUALIFIED_DEPENDENT,
/* An unqualified template-id. */
CP_ID_KIND_TEMPLATE_ID,
/* A qualified-id. */
CP_ID_KIND_QUALIFIED
} cp_id_kind;
/* The various kinds of C++0x warnings we encounter. */
typedef enum cpp0x_warn_str
{
/* extended initializer lists */
CPP0X_INITIALIZER_LISTS,
/* explicit conversion operators */
CPP0X_EXPLICIT_CONVERSION,
/* variadic templates */
CPP0X_VARIADIC_TEMPLATES,
/* lambda expressions */
CPP0X_LAMBDA_EXPR,
/* C++0x auto */
CPP0X_AUTO,
/* scoped enums */
CPP0X_SCOPED_ENUMS,
/* defaulted and deleted functions */
CPP0X_DEFAULTED_DELETED,
/* inline namespaces */
CPP0X_INLINE_NAMESPACES,
/* override controls, override/final */
CPP0X_OVERRIDE_CONTROLS,
/* non-static data member initializers */
CPP0X_NSDMI,
/* user defined literals */
CPP0X_USER_DEFINED_LITERALS,
/* delegating constructors */
CPP0X_DELEGATING_CTORS,
/* inheriting constructors */
CPP0X_INHERITING_CTORS,
/* C++11 attributes */
CPP0X_ATTRIBUTES,
/* ref-qualified member functions */
CPP0X_REF_QUALIFIER
} cpp0x_warn_str;
/* The various kinds of operation used by composite_pointer_type. */
typedef enum composite_pointer_operation
{
/* comparison */
CPO_COMPARISON,
/* conversion */
CPO_CONVERSION,
/* conditional expression */
CPO_CONDITIONAL_EXPR
} composite_pointer_operation;
/* Possible cases of expression list used by build_x_compound_expr_from_list. */
typedef enum expr_list_kind {
ELK_INIT, /* initializer */
ELK_MEM_INIT, /* member initializer */
ELK_FUNC_CAST /* functional cast */
} expr_list_kind;
/* Possible cases of implicit bad rhs conversions. */
typedef enum impl_conv_rhs {
ICR_DEFAULT_ARGUMENT, /* default argument */
ICR_CONVERTING, /* converting */
ICR_INIT, /* initialization */
ICR_ARGPASS, /* argument passing */
ICR_RETURN, /* return */
ICR_ASSIGN /* assignment */
} impl_conv_rhs;
/* Possible cases of implicit or explicit bad conversions to void. */
typedef enum impl_conv_void {
ICV_CAST, /* (explicit) conversion to void */
ICV_SECOND_OF_COND, /* second operand of conditional expression */
ICV_THIRD_OF_COND, /* third operand of conditional expression */
ICV_RIGHT_OF_COMMA, /* right operand of comma operator */
ICV_LEFT_OF_COMMA, /* left operand of comma operator */
ICV_STATEMENT, /* statement */
ICV_THIRD_IN_FOR /* for increment expression */
} impl_conv_void;
/* Macros for access to language-specific slots in an identifier. */
#define IDENTIFIER_NAMESPACE_BINDINGS(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->namespace_bindings)
#define IDENTIFIER_TEMPLATE(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->class_template_info)
/* The IDENTIFIER_BINDING is the innermost cxx_binding for the
identifier. It's PREVIOUS is the next outermost binding. Each
VALUE field is a DECL for the associated declaration. Thus,
name lookup consists simply of pulling off the node at the front
of the list (modulo oddities for looking up the names of types,
and such.) You can use SCOPE field to determine the scope
that bound the name. */
#define IDENTIFIER_BINDING(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->bindings)
/* TREE_TYPE only indicates on local and class scope the current
type. For namespace scope, the presence of a type in any namespace
is indicated with global_type_node, and the real type behind must
be found through lookup. */
#define IDENTIFIER_TYPE_VALUE(NODE) identifier_type_value (NODE)
#define REAL_IDENTIFIER_TYPE_VALUE(NODE) TREE_TYPE (NODE)
#define SET_IDENTIFIER_TYPE_VALUE(NODE,TYPE) (TREE_TYPE (NODE) = (TYPE))
#define IDENTIFIER_HAS_TYPE_VALUE(NODE) (IDENTIFIER_TYPE_VALUE (NODE) ? 1 : 0)
#define IDENTIFIER_LABEL_VALUE(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->label_value)
#define SET_IDENTIFIER_LABEL_VALUE(NODE, VALUE) \
IDENTIFIER_LABEL_VALUE (NODE) = (VALUE)
/* Nonzero if this identifier is used as a virtual function name somewhere
(optimizes searches). */
#define IDENTIFIER_VIRTUAL_P(NODE) TREE_LANG_FLAG_1 (NODE)
/* Nonzero if this identifier is the prefix for a mangled C++ operator
name. */
#define IDENTIFIER_OPNAME_P(NODE) TREE_LANG_FLAG_2 (NODE)
/* Nonzero if this identifier is the name of a type-conversion
operator. */
#define IDENTIFIER_TYPENAME_P(NODE) \
TREE_LANG_FLAG_4 (NODE)
/* Nonzero if this identifier is the name of a constructor or
destructor. */
#define IDENTIFIER_CTOR_OR_DTOR_P(NODE) \
TREE_LANG_FLAG_3 (NODE)
/* True iff NAME is the DECL_ASSEMBLER_NAME for an entity with vague
linkage which the prelinker has assigned to this translation
unit. */
#define IDENTIFIER_REPO_CHOSEN(NAME) \
(TREE_LANG_FLAG_6 (NAME))
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) \
(LANG_TYPE_CLASS_CHECK (TYPE)->fields_readonly)
/* The tokens stored in the default argument. */
#define DEFARG_TOKENS(NODE) \
(((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->tokens)
#define DEFARG_INSTANTIATIONS(NODE) \
(((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->instantiations)
struct GTY (()) tree_default_arg {
struct tree_common common;
struct cp_token_cache *tokens;
vec<tree, va_gc> *instantiations;
};
#define DEFERRED_NOEXCEPT_PATTERN(NODE) \
(((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->pattern)
#define DEFERRED_NOEXCEPT_ARGS(NODE) \
(((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->args)
#define DEFERRED_NOEXCEPT_SPEC_P(NODE) \
((NODE) && (TREE_PURPOSE (NODE)) \
&& (TREE_CODE (TREE_PURPOSE (NODE)) == DEFERRED_NOEXCEPT \
|| is_overloaded_fn (TREE_PURPOSE (NODE))))
struct GTY (()) tree_deferred_noexcept {
struct tree_base base;
tree pattern;
tree args;
};
/* The condition associated with the static assertion. This must be
an integral constant expression. */
#define STATIC_ASSERT_CONDITION(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->condition)
/* The message associated with the static assertion. This must be a
string constant, which will be emitted as an error message when the
static assert condition is false. */
#define STATIC_ASSERT_MESSAGE(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->message)
/* Source location information for a static assertion. */
#define STATIC_ASSERT_SOURCE_LOCATION(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->location)
struct GTY (()) tree_static_assert {
struct tree_common common;
tree condition;
tree message;
location_t location;
};
struct GTY (()) tree_argument_pack_select {
struct tree_common common;
tree argument_pack;
int index;
};
/* The different kinds of traits that we encounter. */
typedef enum cp_trait_kind
{
CPTK_BASES,
CPTK_DIRECT_BASES,
CPTK_HAS_NOTHROW_ASSIGN,
CPTK_HAS_NOTHROW_CONSTRUCTOR,
CPTK_HAS_NOTHROW_COPY,
CPTK_HAS_TRIVIAL_ASSIGN,
CPTK_HAS_TRIVIAL_CONSTRUCTOR,
CPTK_HAS_TRIVIAL_COPY,
CPTK_HAS_TRIVIAL_DESTRUCTOR,
CPTK_HAS_VIRTUAL_DESTRUCTOR,
CPTK_IS_ABSTRACT,
CPTK_IS_BASE_OF,
CPTK_IS_CLASS,
CPTK_IS_CONVERTIBLE_TO,
CPTK_IS_EMPTY,
CPTK_IS_ENUM,
CPTK_IS_FINAL,
CPTK_IS_LITERAL_TYPE,
CPTK_IS_POD,
CPTK_IS_POLYMORPHIC,
CPTK_IS_STD_LAYOUT,
CPTK_IS_TRIVIAL,
CPTK_IS_UNION,
CPTK_UNDERLYING_TYPE
} cp_trait_kind;
/* The types that we are processing. */
#define TRAIT_EXPR_TYPE1(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type1)
#define TRAIT_EXPR_TYPE2(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type2)
/* The specific trait that we are processing. */
#define TRAIT_EXPR_KIND(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->kind)
struct GTY (()) tree_trait_expr {
struct tree_common common;
tree type1;
tree type2;
enum cp_trait_kind kind;
};
/* Based off of TYPE_ANONYMOUS_P. */
#define LAMBDA_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_LAMBDA_EXPR (NODE))
/* Test if FUNCTION_DECL is a lambda function. */
#define LAMBDA_FUNCTION_P(FNDECL) \
(DECL_OVERLOADED_OPERATOR_P (FNDECL) == CALL_EXPR \
&& LAMBDA_TYPE_P (CP_DECL_CONTEXT (FNDECL)))
enum cp_lambda_default_capture_mode_type {
CPLD_NONE,
CPLD_COPY,
CPLD_REFERENCE
};
/* The method of default capture, if any. */
#define LAMBDA_EXPR_DEFAULT_CAPTURE_MODE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->default_capture_mode)
/* The capture-list, including `this'. Each capture is stored as a FIELD_DECL
* so that the name, type, and field are all together, whether or not it has
* been added to the lambda's class type.
TREE_LIST:
TREE_PURPOSE: The FIELD_DECL for this capture.
TREE_VALUE: The initializer. This is part of a GNU extension. */
#define LAMBDA_EXPR_CAPTURE_LIST(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->capture_list)
/* During parsing of the lambda-introducer, the node in the capture-list
that holds the 'this' capture. During parsing of the body, the
capture proxy for that node. */
#define LAMBDA_EXPR_THIS_CAPTURE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->this_capture)
/* Predicate tracking whether `this' is in the effective capture set. */
#define LAMBDA_EXPR_CAPTURES_THIS_P(NODE) \
LAMBDA_EXPR_THIS_CAPTURE(NODE)
/* Predicate tracking whether the lambda was declared 'mutable'. */
#define LAMBDA_EXPR_MUTABLE_P(NODE) \
TREE_LANG_FLAG_1 (LAMBDA_EXPR_CHECK (NODE))
/* The return type in the expression.
* NULL_TREE indicates that none was specified. */
#define LAMBDA_EXPR_RETURN_TYPE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->return_type)
/* The source location of the lambda. */
#define LAMBDA_EXPR_LOCATION(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->locus)
/* The mangling scope for the lambda: FUNCTION_DECL, PARM_DECL, VAR_DECL,
FIELD_DECL or NULL_TREE. If this is NULL_TREE, we have no linkage. */
#define LAMBDA_EXPR_EXTRA_SCOPE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->extra_scope)
/* If EXTRA_SCOPE, this is the number of the lambda within that scope. */
#define LAMBDA_EXPR_DISCRIMINATOR(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->discriminator)
/* During parsing of the lambda, a vector of capture proxies which need
to be pushed once we're done processing a nested lambda. */
#define LAMBDA_EXPR_PENDING_PROXIES(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->pending_proxies)
/* The closure type of the lambda. Note that the TREE_TYPE of a
LAMBDA_EXPR is always NULL_TREE, because we need to instantiate the
LAMBDA_EXPR in order to instantiate the type. */
#define LAMBDA_EXPR_CLOSURE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->closure)
struct GTY (()) tree_lambda_expr
{
struct tree_typed typed;
tree capture_list;
tree this_capture;
tree return_type;
tree extra_scope;
tree closure;
vec<tree, va_gc> *pending_proxies;
location_t locus;
enum cp_lambda_default_capture_mode_type default_capture_mode;
int discriminator;
};
/* A (typedef,context,usage location) triplet.
It represents a typedef used through a
context at a given source location.
e.g.
struct foo {
typedef int myint;
};
struct bar {
foo::myint v; // #1<-- this location.
};
In bar, the triplet will be (myint, foo, #1).
*/
struct GTY(()) qualified_typedef_usage_s {
tree typedef_decl;
tree context;
location_t locus;
};
typedef struct qualified_typedef_usage_s qualified_typedef_usage_t;
/* Non-zero if this template specialization has access violations that
should be rechecked when the function is instantiated outside argument
deduction. */
#define TINFO_HAS_ACCESS_ERRORS(NODE) \
(TREE_LANG_FLAG_0 (TEMPLATE_INFO_CHECK (NODE)))
#define FNDECL_HAS_ACCESS_ERRORS(NODE) \
(TINFO_HAS_ACCESS_ERRORS (DECL_TEMPLATE_INFO (NODE)))
struct GTY(()) tree_template_info {
struct tree_common common;
vec<qualified_typedef_usage_t, va_gc> *typedefs_needing_access_checking;
};
enum cp_tree_node_structure_enum {
TS_CP_GENERIC,
TS_CP_IDENTIFIER,
TS_CP_TPI,
TS_CP_PTRMEM,
TS_CP_BINDING,
TS_CP_OVERLOAD,
TS_CP_BASELINK,
TS_CP_WRAPPER,
TS_CP_DEFAULT_ARG,
TS_CP_DEFERRED_NOEXCEPT,
TS_CP_STATIC_ASSERT,
TS_CP_ARGUMENT_PACK_SELECT,
TS_CP_TRAIT_EXPR,
TS_CP_LAMBDA_EXPR,
TS_CP_TEMPLATE_INFO,
TS_CP_USERDEF_LITERAL,
LAST_TS_CP_ENUM
};
/* The resulting tree type. */
union GTY((desc ("cp_tree_node_structure (&%h)"),
chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node {
union tree_node GTY ((tag ("TS_CP_GENERIC"),
desc ("tree_node_structure (&%h)"))) generic;
struct template_parm_index_s GTY ((tag ("TS_CP_TPI"))) tpi;
struct ptrmem_cst GTY ((tag ("TS_CP_PTRMEM"))) ptrmem;
struct tree_overload GTY ((tag ("TS_CP_OVERLOAD"))) overload;
struct tree_baselink GTY ((tag ("TS_CP_BASELINK"))) baselink;
struct tree_default_arg GTY ((tag ("TS_CP_DEFAULT_ARG"))) default_arg;
struct tree_deferred_noexcept GTY ((tag ("TS_CP_DEFERRED_NOEXCEPT"))) deferred_noexcept;
struct lang_identifier GTY ((tag ("TS_CP_IDENTIFIER"))) identifier;
struct tree_static_assert GTY ((tag ("TS_CP_STATIC_ASSERT")))
static_assertion;
struct tree_argument_pack_select GTY ((tag ("TS_CP_ARGUMENT_PACK_SELECT")))
argument_pack_select;
struct tree_trait_expr GTY ((tag ("TS_CP_TRAIT_EXPR")))
trait_expression;
struct tree_lambda_expr GTY ((tag ("TS_CP_LAMBDA_EXPR")))
lambda_expression;
struct tree_template_info GTY ((tag ("TS_CP_TEMPLATE_INFO")))
template_info;
struct tree_userdef_literal GTY ((tag ("TS_CP_USERDEF_LITERAL")))
userdef_literal;
};
enum cp_tree_index
{
CPTI_JAVA_BYTE_TYPE,
CPTI_JAVA_SHORT_TYPE,
CPTI_JAVA_INT_TYPE,
CPTI_JAVA_LONG_TYPE,
CPTI_JAVA_FLOAT_TYPE,
CPTI_JAVA_DOUBLE_TYPE,
CPTI_JAVA_CHAR_TYPE,
CPTI_JAVA_BOOLEAN_TYPE,
CPTI_WCHAR_DECL,
CPTI_VTABLE_ENTRY_TYPE,
CPTI_DELTA_TYPE,
CPTI_VTABLE_INDEX_TYPE,
CPTI_CLEANUP_TYPE,
CPTI_VTT_PARM_TYPE,
CPTI_CLASS_TYPE,
CPTI_UNKNOWN_TYPE,
CPTI_INIT_LIST_TYPE,
CPTI_VTBL_TYPE,
CPTI_VTBL_PTR_TYPE,
CPTI_STD,
CPTI_ABI,
CPTI_CONST_TYPE_INFO_TYPE,
CPTI_TYPE_INFO_PTR_TYPE,
CPTI_ABORT_FNDECL,
CPTI_GLOBAL_DELETE_FNDECL,
CPTI_AGGR_TAG,
CPTI_CTOR_IDENTIFIER,
CPTI_COMPLETE_CTOR_IDENTIFIER,
CPTI_BASE_CTOR_IDENTIFIER,
CPTI_DTOR_IDENTIFIER,
CPTI_COMPLETE_DTOR_IDENTIFIER,
CPTI_BASE_DTOR_IDENTIFIER,
CPTI_DELETING_DTOR_IDENTIFIER,
CPTI_DELTA_IDENTIFIER,
CPTI_IN_CHARGE_IDENTIFIER,
CPTI_VTT_PARM_IDENTIFIER,
CPTI_NELTS_IDENTIFIER,
CPTI_THIS_IDENTIFIER,
CPTI_PFN_IDENTIFIER,
CPTI_VPTR_IDENTIFIER,
CPTI_STD_IDENTIFIER,
CPTI_LANG_NAME_C,
CPTI_LANG_NAME_CPLUSPLUS,
CPTI_LANG_NAME_JAVA,
CPTI_EMPTY_EXCEPT_SPEC,
CPTI_NOEXCEPT_TRUE_SPEC,
CPTI_NOEXCEPT_FALSE_SPEC,
CPTI_JCLASS,
CPTI_TERMINATE,
CPTI_CALL_UNEXPECTED,
CPTI_ATEXIT_FN_PTR_TYPE,
CPTI_ATEXIT,
CPTI_DSO_HANDLE,
CPTI_DCAST,
CPTI_KEYED_CLASSES,
CPTI_NULLPTR,
CPTI_NULLPTR_TYPE,
CPTI_MAX
};
extern GTY(()) tree cp_global_trees[CPTI_MAX];
#define java_byte_type_node cp_global_trees[CPTI_JAVA_BYTE_TYPE]
#define java_short_type_node cp_global_trees[CPTI_JAVA_SHORT_TYPE]
#define java_int_type_node cp_global_trees[CPTI_JAVA_INT_TYPE]
#define java_long_type_node cp_global_trees[CPTI_JAVA_LONG_TYPE]
#define java_float_type_node cp_global_trees[CPTI_JAVA_FLOAT_TYPE]
#define java_double_type_node cp_global_trees[CPTI_JAVA_DOUBLE_TYPE]
#define java_char_type_node cp_global_trees[CPTI_JAVA_CHAR_TYPE]
#define java_boolean_type_node cp_global_trees[CPTI_JAVA_BOOLEAN_TYPE]
#define wchar_decl_node cp_global_trees[CPTI_WCHAR_DECL]
#define vtable_entry_type cp_global_trees[CPTI_VTABLE_ENTRY_TYPE]
/* The type used to represent an offset by which to adjust the `this'
pointer in pointer-to-member types. */
#define delta_type_node cp_global_trees[CPTI_DELTA_TYPE]
/* The type used to represent an index into the vtable. */
#define vtable_index_type cp_global_trees[CPTI_VTABLE_INDEX_TYPE]
#define class_type_node cp_global_trees[CPTI_CLASS_TYPE]
#define unknown_type_node cp_global_trees[CPTI_UNKNOWN_TYPE]
#define init_list_type_node cp_global_trees[CPTI_INIT_LIST_TYPE]
#define vtbl_type_node cp_global_trees[CPTI_VTBL_TYPE]
#define vtbl_ptr_type_node cp_global_trees[CPTI_VTBL_PTR_TYPE]
#define std_node cp_global_trees[CPTI_STD]
#define abi_node cp_global_trees[CPTI_ABI]
#define const_type_info_type_node cp_global_trees[CPTI_CONST_TYPE_INFO_TYPE]
#define type_info_ptr_type cp_global_trees[CPTI_TYPE_INFO_PTR_TYPE]
#define abort_fndecl cp_global_trees[CPTI_ABORT_FNDECL]
#define global_delete_fndecl cp_global_trees[CPTI_GLOBAL_DELETE_FNDECL]
#define current_aggr cp_global_trees[CPTI_AGGR_TAG]
#define nullptr_node cp_global_trees[CPTI_NULLPTR]
#define nullptr_type_node cp_global_trees[CPTI_NULLPTR_TYPE]
/* We cache these tree nodes so as to call get_identifier less
frequently. */
/* The name of a constructor that takes an in-charge parameter to
decide whether or not to construct virtual base classes. */
#define ctor_identifier cp_global_trees[CPTI_CTOR_IDENTIFIER]
/* The name of a constructor that constructs virtual base classes. */
#define complete_ctor_identifier cp_global_trees[CPTI_COMPLETE_CTOR_IDENTIFIER]
/* The name of a constructor that does not construct virtual base classes. */
#define base_ctor_identifier cp_global_trees[CPTI_BASE_CTOR_IDENTIFIER]
/* The name of a destructor that takes an in-charge parameter to
decide whether or not to destroy virtual base classes and whether
or not to delete the object. */
#define dtor_identifier cp_global_trees[CPTI_DTOR_IDENTIFIER]
/* The name of a destructor that destroys virtual base classes. */
#define complete_dtor_identifier cp_global_trees[CPTI_COMPLETE_DTOR_IDENTIFIER]
/* The name of a destructor that does not destroy virtual base
classes. */
#define base_dtor_identifier cp_global_trees[CPTI_BASE_DTOR_IDENTIFIER]
/* The name of a destructor that destroys virtual base classes, and
then deletes the entire object. */
#define deleting_dtor_identifier cp_global_trees[CPTI_DELETING_DTOR_IDENTIFIER]
#define delta_identifier cp_global_trees[CPTI_DELTA_IDENTIFIER]
#define in_charge_identifier cp_global_trees[CPTI_IN_CHARGE_IDENTIFIER]
/* The name of the parameter that contains a pointer to the VTT to use
for this subobject constructor or destructor. */
#define vtt_parm_identifier cp_global_trees[CPTI_VTT_PARM_IDENTIFIER]
#define nelts_identifier cp_global_trees[CPTI_NELTS_IDENTIFIER]
#define this_identifier cp_global_trees[CPTI_THIS_IDENTIFIER]
#define pfn_identifier cp_global_trees[CPTI_PFN_IDENTIFIER]
#define vptr_identifier cp_global_trees[CPTI_VPTR_IDENTIFIER]
/* The name of the std namespace. */
#define std_identifier cp_global_trees[CPTI_STD_IDENTIFIER]
#define lang_name_c cp_global_trees[CPTI_LANG_NAME_C]
#define lang_name_cplusplus cp_global_trees[CPTI_LANG_NAME_CPLUSPLUS]
#define lang_name_java cp_global_trees[CPTI_LANG_NAME_JAVA]
/* Exception specifier used for throw(). */
#define empty_except_spec cp_global_trees[CPTI_EMPTY_EXCEPT_SPEC]
#define noexcept_true_spec cp_global_trees[CPTI_NOEXCEPT_TRUE_SPEC]
#define noexcept_false_spec cp_global_trees[CPTI_NOEXCEPT_FALSE_SPEC]
/* If non-NULL, a POINTER_TYPE equivalent to (java::lang::Class*). */
#define jclass_node cp_global_trees[CPTI_JCLASS]
/* The declaration for `std::terminate'. */
#define terminate_node cp_global_trees[CPTI_TERMINATE]
/* The declaration for "__cxa_call_unexpected". */
#define call_unexpected_node cp_global_trees[CPTI_CALL_UNEXPECTED]
/* The type of the function-pointer argument to "__cxa_atexit" (or
"std::atexit", if "__cxa_atexit" is not being used). */
#define atexit_fn_ptr_type_node cp_global_trees[CPTI_ATEXIT_FN_PTR_TYPE]
/* A pointer to `std::atexit'. */
#define atexit_node cp_global_trees[CPTI_ATEXIT]
/* A pointer to `__dso_handle'. */
#define dso_handle_node cp_global_trees[CPTI_DSO_HANDLE]
/* The declaration of the dynamic_cast runtime. */
#define dynamic_cast_node cp_global_trees[CPTI_DCAST]
/* The type of a destructor. */
#define cleanup_type cp_global_trees[CPTI_CLEANUP_TYPE]
/* The type of the vtt parameter passed to subobject constructors and
destructors. */
#define vtt_parm_type cp_global_trees[CPTI_VTT_PARM_TYPE]
/* A TREE_LIST of the dynamic classes whose vtables may have to be
emitted in this translation unit. */
#define keyed_classes cp_global_trees[CPTI_KEYED_CLASSES]
/* Node to indicate default access. This must be distinct from the
access nodes in tree.h. */
#define access_default_node null_node
/* Global state. */
struct GTY(()) saved_scope {
vec<cxx_saved_binding, va_gc> *old_bindings;
tree old_namespace;
vec<tree, va_gc> *decl_ns_list;
tree class_name;
tree class_type;
tree access_specifier;
tree function_decl;
vec<tree, va_gc> *lang_base;
tree lang_name;
tree template_parms;
cp_binding_level *x_previous_class_level;
tree x_saved_tree;
/* Only used for uses of this in trailing return type. */
tree x_current_class_ptr;
tree x_current_class_ref;
int x_processing_template_decl;
int x_processing_specialization;
BOOL_BITFIELD x_processing_explicit_instantiation : 1;
BOOL_BITFIELD need_pop_function_context : 1;
int unevaluated_operand;
int inhibit_evaluation_warnings;
struct stmt_tree_s x_stmt_tree;
cp_binding_level *class_bindings;
cp_binding_level *bindings;
struct saved_scope *prev;
};
/* The current open namespace. */
#define current_namespace scope_chain->old_namespace
/* The stack for namespaces of current declarations. */
#define decl_namespace_list scope_chain->decl_ns_list
/* IDENTIFIER_NODE: name of current class */
#define current_class_name scope_chain->class_name
/* _TYPE: the type of the current class */
#define current_class_type scope_chain->class_type
/* When parsing a class definition, the access specifier most recently
given by the user, or, if no access specifier was given, the
default value appropriate for the kind of class (i.e., struct,
class, or union). */
#define current_access_specifier scope_chain->access_specifier
/* Pointer to the top of the language name stack. */
#define current_lang_base scope_chain->lang_base
#define current_lang_name scope_chain->lang_name
/* When parsing a template declaration, a TREE_LIST represents the
active template parameters. Each node in the list represents one
level of template parameters. The innermost level is first in the
list. The depth of each level is stored as an INTEGER_CST in the
TREE_PURPOSE of each node. The parameters for that level are
stored in the TREE_VALUE. */
#define current_template_parms scope_chain->template_parms
#define processing_template_decl scope_chain->x_processing_template_decl
#define processing_specialization scope_chain->x_processing_specialization
#define processing_explicit_instantiation scope_chain->x_processing_explicit_instantiation
/* The cached class binding level, from the most recently exited
class, or NULL if none. */
#define previous_class_level scope_chain->x_previous_class_level
/* A list of private types mentioned, for deferred access checking. */
extern GTY(()) struct saved_scope *scope_chain;
struct GTY(()) cxx_int_tree_map {
unsigned int uid;
tree to;
};
extern unsigned int cxx_int_tree_map_hash (const void *);
extern int cxx_int_tree_map_eq (const void *, const void *);
/* Global state pertinent to the current function. */
struct GTY(()) language_function {
struct c_language_function base;
tree x_cdtor_label;
tree x_current_class_ptr;
tree x_current_class_ref;
tree x_eh_spec_block;
tree x_in_charge_parm;
tree x_vtt_parm;
tree x_return_value;
tree x_auto_return_pattern;
BOOL_BITFIELD returns_value : 1;
BOOL_BITFIELD returns_null : 1;
BOOL_BITFIELD returns_abnormally : 1;
BOOL_BITFIELD x_in_function_try_handler : 1;
BOOL_BITFIELD x_in_base_initializer : 1;
/* True if this function can throw an exception. */
BOOL_BITFIELD can_throw : 1;
htab_t GTY((param_is(struct named_label_entry))) x_named_labels;
cp_binding_level *bindings;
vec<tree, va_gc> *x_local_names;
htab_t GTY((param_is (struct cxx_int_tree_map))) extern_decl_map;
};
/* The current C++-specific per-function global variables. */
#define cp_function_chain (cfun->language)
/* In a constructor destructor, the point at which all derived class
destroying/construction has been done. I.e., just before a
constructor returns, or before any base class destroying will be done
in a destructor. */
#define cdtor_label cp_function_chain->x_cdtor_label
/* When we're processing a member function, current_class_ptr is the
PARM_DECL for the `this' pointer. The current_class_ref is an
expression for `*this'. */
#define current_class_ptr \
(*(cfun && cp_function_chain \
? &cp_function_chain->x_current_class_ptr \
: &scope_chain->x_current_class_ptr))
#define current_class_ref \
(*(cfun && cp_function_chain \
? &cp_function_chain->x_current_class_ref \
: &scope_chain->x_current_class_ref))
/* The EH_SPEC_BLOCK for the exception-specifiers for the current
function, if any. */
#define current_eh_spec_block cp_function_chain->x_eh_spec_block
/* The `__in_chrg' parameter for the current function. Only used for
constructors and destructors. */
#define current_in_charge_parm cp_function_chain->x_in_charge_parm
/* The `__vtt_parm' parameter for the current function. Only used for
constructors and destructors. */
#define current_vtt_parm cp_function_chain->x_vtt_parm
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
#define current_function_returns_value cp_function_chain->returns_value
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
#define current_function_returns_null cp_function_chain->returns_null
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
#define current_function_returns_abnormally \
cp_function_chain->returns_abnormally
/* Nonzero if we are processing a base initializer. Zero elsewhere. */
#define in_base_initializer cp_function_chain->x_in_base_initializer
#define in_function_try_handler cp_function_chain->x_in_function_try_handler
/* Expression always returned from function, or error_mark_node
otherwise, for use by the automatic named return value optimization. */
#define current_function_return_value \
(cp_function_chain->x_return_value)
/* A type involving 'auto' to be used for return type deduction. */
#define current_function_auto_return_pattern \
(cp_function_chain->x_auto_return_pattern)
/* True if NAME is the IDENTIFIER_NODE for an overloaded "operator
new" or "operator delete". */
#define NEW_DELETE_OPNAME_P(NAME) \
((NAME) == ansi_opname (NEW_EXPR) \
|| (NAME) == ansi_opname (VEC_NEW_EXPR) \
|| (NAME) == ansi_opname (DELETE_EXPR) \
|| (NAME) == ansi_opname (VEC_DELETE_EXPR))
#define ansi_opname(CODE) \
(operator_name_info[(int) (CODE)].identifier)
#define ansi_assopname(CODE) \
(assignment_operator_name_info[(int) (CODE)].identifier)
/* TRUE if a tree code represents a statement. */
extern bool statement_code_p[MAX_TREE_CODES];
#define STATEMENT_CODE_P(CODE) statement_code_p[(int) (CODE)]
enum languages { lang_c, lang_cplusplus, lang_java };
/* Macros to make error reporting functions' lives easier. */
#define TYPE_IDENTIFIER(NODE) (DECL_NAME (TYPE_NAME (NODE)))
#define TYPE_LINKAGE_IDENTIFIER(NODE) \
(TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (NODE)))
#define TYPE_NAME_STRING(NODE) (IDENTIFIER_POINTER (TYPE_IDENTIFIER (NODE)))
#define TYPE_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (TYPE_IDENTIFIER (NODE)))
/* Nonzero if NODE has no name for linkage purposes. */
#define TYPE_ANONYMOUS_P(NODE) \
(TAGGED_TYPE_P (NODE) && ANON_AGGRNAME_P (TYPE_LINKAGE_IDENTIFIER (NODE)))
/* The _DECL for this _TYPE. */
#define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE)))
/* Nonzero if T is a type that could resolve to any kind of concrete type
at instantiation time. */
#define WILDCARD_TYPE_P(T) \
(TREE_CODE (T) == TEMPLATE_TYPE_PARM \
|| TREE_CODE (T) == TYPENAME_TYPE \
|| TREE_CODE (T) == TYPEOF_TYPE \
|| TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \
|| TREE_CODE (T) == DECLTYPE_TYPE)
/* Nonzero if T is a class (or struct or union) type. Also nonzero
for template type parameters, typename types, and instantiated
template template parameters. Keep these checks in ascending code
order. */
#define MAYBE_CLASS_TYPE_P(T) (WILDCARD_TYPE_P (T) || CLASS_TYPE_P (T))
/* Set CLASS_TYPE_P for T to VAL. T must be a class, struct, or
union type. */
#define SET_CLASS_TYPE_P(T, VAL) \
(TYPE_LANG_FLAG_5 (T) = (VAL))
/* Nonzero if T is a class type. Zero for template type parameters,
typename types, and so forth. */
#define CLASS_TYPE_P(T) \
(RECORD_OR_UNION_CODE_P (TREE_CODE (T)) && TYPE_LANG_FLAG_5 (T))
/* Nonzero if T is a class type but not an union. */
#define NON_UNION_CLASS_TYPE_P(T) \
(CLASS_TYPE_P (T) && TREE_CODE (T) != UNION_TYPE)
/* Keep these checks in ascending code order. */
#define RECORD_OR_UNION_CODE_P(T) \
((T) == RECORD_TYPE || (T) == UNION_TYPE)
#define TAGGED_TYPE_P(T) \
(CLASS_TYPE_P (T) || TREE_CODE (T) == ENUMERAL_TYPE)
#define IS_OVERLOAD_TYPE(T) TAGGED_TYPE_P (T)
/* True if this a "Java" type, defined in 'extern "Java"'. */
#define TYPE_FOR_JAVA(NODE) TYPE_LANG_FLAG_3 (NODE)
/* True if this type is dependent. This predicate is only valid if
TYPE_DEPENDENT_P_VALID is true. */
#define TYPE_DEPENDENT_P(NODE) TYPE_LANG_FLAG_0 (NODE)
/* True if dependent_type_p has been called for this type, with the
result that TYPE_DEPENDENT_P is valid. */
#define TYPE_DEPENDENT_P_VALID(NODE) TYPE_LANG_FLAG_6(NODE)
/* Nonzero if this type is const-qualified. */
#define CP_TYPE_CONST_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_CONST) != 0)
/* Nonzero if this type is volatile-qualified. */
#define CP_TYPE_VOLATILE_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_VOLATILE) != 0)
/* Nonzero if this type is restrict-qualified. */
#define CP_TYPE_RESTRICT_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_RESTRICT) != 0)
/* Nonzero if this type is const-qualified, but not
volatile-qualified. Other qualifiers are ignored. This macro is
used to test whether or not it is OK to bind an rvalue to a
reference. */
#define CP_TYPE_CONST_NON_VOLATILE_P(NODE) \
((cp_type_quals (NODE) & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) \
== TYPE_QUAL_CONST)
#define FUNCTION_ARG_CHAIN(NODE) \
TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (NODE)))
/* Given a FUNCTION_DECL, returns the first TREE_LIST out of TYPE_ARG_TYPES
which refers to a user-written parameter. */
#define FUNCTION_FIRST_USER_PARMTYPE(NODE) \
skip_artificial_parms_for ((NODE), TYPE_ARG_TYPES (TREE_TYPE (NODE)))
/* Similarly, but for DECL_ARGUMENTS. */
#define FUNCTION_FIRST_USER_PARM(NODE) \
skip_artificial_parms_for ((NODE), DECL_ARGUMENTS (NODE))
/* Nonzero iff TYPE is derived from PARENT. Ignores accessibility and
ambiguity issues. */
#define DERIVED_FROM_P(PARENT, TYPE) \
(lookup_base ((TYPE), (PARENT), ba_any, NULL, tf_warning_or_error)\
!= NULL_TREE)
/* Gives the visibility specification for a class type. */
#define CLASSTYPE_VISIBILITY(TYPE) \
DECL_VISIBILITY (TYPE_MAIN_DECL (TYPE))
#define CLASSTYPE_VISIBILITY_SPECIFIED(TYPE) \
DECL_VISIBILITY_SPECIFIED (TYPE_MAIN_DECL (TYPE))
typedef struct GTY (()) tree_pair_s {
tree purpose;
tree value;
} tree_pair_s;
typedef tree_pair_s *tree_pair_p;
/* This is a few header flags for 'struct lang_type'. Actually,
all but the first are used only for lang_type_class; they
are put in this structure to save space. */
struct GTY(()) lang_type_header {
BOOL_BITFIELD is_lang_type_class : 1;
BOOL_BITFIELD has_type_conversion : 1;
BOOL_BITFIELD has_copy_ctor : 1;
BOOL_BITFIELD has_default_ctor : 1;
BOOL_BITFIELD const_needs_init : 1;
BOOL_BITFIELD ref_needs_init : 1;
BOOL_BITFIELD has_const_copy_assign : 1;
BOOL_BITFIELD spare : 1;
};
/* This structure provides additional information above and beyond
what is provide in the ordinary tree_type. In the past, we used it
for the types of class types, template parameters types, typename
types, and so forth. However, there can be many (tens to hundreds
of thousands) of template parameter types in a compilation, and
there's no need for this additional information in that case.
Therefore, we now use this data structure only for class types.
In the past, it was thought that there would be relatively few
class types. However, in the presence of heavy use of templates,
many (i.e., thousands) of classes can easily be generated.
Therefore, we should endeavor to keep the size of this structure to
a minimum. */
struct GTY(()) lang_type_class {
struct lang_type_header h;
unsigned char align;
unsigned has_mutable : 1;
unsigned com_interface : 1;
unsigned non_pod_class : 1;
unsigned nearly_empty_p : 1;
unsigned user_align : 1;
unsigned has_copy_assign : 1;
unsigned has_new : 1;
unsigned has_array_new : 1;
unsigned gets_delete : 2;
unsigned interface_only : 1;
unsigned interface_unknown : 1;
unsigned contains_empty_class_p : 1;
unsigned anon_aggr : 1;
unsigned non_zero_init : 1;
unsigned empty_p : 1;
unsigned vec_new_uses_cookie : 1;
unsigned declared_class : 1;
unsigned diamond_shaped : 1;
unsigned repeated_base : 1;
unsigned being_defined : 1;
unsigned java_interface : 1;
unsigned debug_requested : 1;
unsigned fields_readonly : 1;
unsigned use_template : 2;
unsigned ptrmemfunc_flag : 1;
unsigned was_anonymous : 1;
unsigned lazy_default_ctor : 1;
unsigned lazy_copy_ctor : 1;
unsigned lazy_copy_assign : 1;
unsigned lazy_destructor : 1;
unsigned has_const_copy_ctor : 1;
unsigned has_complex_copy_ctor : 1;
unsigned has_complex_copy_assign : 1;
unsigned non_aggregate : 1;
unsigned has_complex_dflt : 1;
unsigned has_list_ctor : 1;
unsigned non_std_layout : 1;
unsigned is_literal : 1;
unsigned lazy_move_ctor : 1;
unsigned lazy_move_assign : 1;
unsigned has_complex_move_ctor : 1;
unsigned has_complex_move_assign : 1;
unsigned has_constexpr_ctor : 1;
unsigned is_final : 1;
/* When adding a flag here, consider whether or not it ought to
apply to a template instance if it applies to the template. If
so, make sure to copy it in instantiate_class_template! */
/* There are some bits left to fill out a 32-bit word. Keep track
of this by updating the size of this bitfield whenever you add or
remove a flag. */
unsigned dummy : 2;
tree primary_base;
vec<tree_pair_s, va_gc> *vcall_indices;
tree vtables;
tree typeinfo_var;
vec<tree, va_gc> *vbases;
binding_table nested_udts;
tree as_base;
vec<tree, va_gc> *pure_virtuals;
tree friend_classes;
vec<tree, va_gc> * GTY((reorder ("resort_type_method_vec"))) methods;
tree key_method;
tree decl_list;
tree template_info;
tree befriending_classes;
/* In a RECORD_TYPE, information specific to Objective-C++, such
as a list of adopted protocols or a pointer to a corresponding
@interface. See objc/objc-act.h for details. */
tree objc_info;
/* sorted_fields is sorted based on a pointer, so we need to be able
to resort it if pointers get rearranged. */
struct sorted_fields_type * GTY ((reorder ("resort_sorted_fields")))
sorted_fields;
/* FIXME reuse another field? */
tree lambda_expr;
};
struct GTY(()) lang_type_ptrmem {
struct lang_type_header h;
tree record;
};
struct GTY((variable_size)) lang_type {
union lang_type_u
{
struct lang_type_header GTY((skip (""))) h;
struct lang_type_class GTY((tag ("1"))) c;
struct lang_type_ptrmem GTY((tag ("0"))) ptrmem;
} GTY((desc ("%h.h.is_lang_type_class"))) u;
};
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define LANG_TYPE_CLASS_CHECK(NODE) __extension__ \
({ struct lang_type *lt = TYPE_LANG_SPECIFIC (NODE); \
if (! lt->u.h.is_lang_type_class) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.c; })
#define LANG_TYPE_PTRMEM_CHECK(NODE) __extension__ \
({ struct lang_type *lt = TYPE_LANG_SPECIFIC (NODE); \
if (lt->u.h.is_lang_type_class) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.ptrmem; })
#else
#define LANG_TYPE_CLASS_CHECK(NODE) (&TYPE_LANG_SPECIFIC (NODE)->u.c)
#define LANG_TYPE_PTRMEM_CHECK(NODE) (&TYPE_LANG_SPECIFIC (NODE)->u.ptrmem)
#endif /* ENABLE_TREE_CHECKING */
/* Nonzero for _CLASSTYPE means that operator delete is defined. */
#define TYPE_GETS_DELETE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->gets_delete)
#define TYPE_GETS_REG_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 1)
/* Nonzero if `new NODE[x]' should cause the allocation of extra
storage to indicate how many array elements are in use. */
#define TYPE_VEC_NEW_USES_COOKIE(NODE) \
(CLASS_TYPE_P (NODE) \
&& LANG_TYPE_CLASS_CHECK (NODE)->vec_new_uses_cookie)
/* Nonzero means that this _CLASSTYPE node defines ways of converting
itself to other types. */
#define TYPE_HAS_CONVERSION(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.has_type_conversion)
/* Nonzero means that NODE (a class type) has a default constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_DEFAULT_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_default_ctor)
/* Nonzero means that NODE (a class type) has a copy constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_COPY_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_ctor)
/* Nonzero means that NODE (a class type) has a move constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_MOVE_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_ctor)
/* Nonzero means that NODE (a class type) has an assignment operator
-- but that it has not yet been declared. */
#define CLASSTYPE_LAZY_COPY_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_assign)
/* Nonzero means that NODE (a class type) has an assignment operator
-- but that it has not yet been declared. */
#define CLASSTYPE_LAZY_MOVE_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_assign)
/* Nonzero means that NODE (a class type) has a destructor -- but that
it has not yet been declared. */
#define CLASSTYPE_LAZY_DESTRUCTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_destructor)
/* Nonzero means that NODE (a class type) is final */
#define CLASSTYPE_FINAL(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->is_final)
/* Nonzero means that this _CLASSTYPE node overloads operator=(X&). */
#define TYPE_HAS_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_assign)
/* True iff the class type NODE has an "operator =" whose parameter
has a parameter of type "const X&". */
#define TYPE_HAS_CONST_COPY_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.has_const_copy_assign)
/* Nonzero means that this _CLASSTYPE node has an X(X&) constructor. */
#define TYPE_HAS_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->h.has_copy_ctor)
#define TYPE_HAS_CONST_COPY_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_ctor)
/* Nonzero if this class has an X(initializer_list<T>) constructor. */
#define TYPE_HAS_LIST_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_list_ctor)
/* Nonzero if this class has a constexpr constructor other than a copy/move
constructor. Note that a class can have constexpr constructors for
static initialization even if it isn't a literal class. */
#define TYPE_HAS_CONSTEXPR_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_constexpr_ctor)
/* Nonzero if this class defines an overloaded operator new. (An
operator new [] doesn't count.) */
#define TYPE_HAS_NEW_OPERATOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_new)
/* Nonzero if this class defines an overloaded operator new[]. */
#define TYPE_HAS_ARRAY_NEW_OPERATOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_array_new)
/* Nonzero means that this type is being defined. I.e., the left brace
starting the definition of this type has been seen. */
#define TYPE_BEING_DEFINED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->being_defined)
/* Nonzero means that this type is either complete or being defined, so we
can do lookup in it. */
#define COMPLETE_OR_OPEN_TYPE_P(NODE) \
(COMPLETE_TYPE_P (NODE) || (CLASS_TYPE_P (NODE) && TYPE_BEING_DEFINED (NODE)))
/* Mark bits for repeated base checks. */
#define TYPE_MARKED_P(NODE) TREE_LANG_FLAG_6 (TYPE_CHECK (NODE))
/* Nonzero if the class NODE has multiple paths to the same (virtual)
base object. */
#define CLASSTYPE_DIAMOND_SHAPED_P(NODE) \
(LANG_TYPE_CLASS_CHECK(NODE)->diamond_shaped)
/* Nonzero if the class NODE has multiple instances of the same base
type. */
#define CLASSTYPE_REPEATED_BASE_P(NODE) \
(LANG_TYPE_CLASS_CHECK(NODE)->repeated_base)
/* The member function with which the vtable will be emitted:
the first noninline non-pure-virtual member function. NULL_TREE
if there is no key function or if this is a class template */
#define CLASSTYPE_KEY_METHOD(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->key_method)
/* Vector member functions defined in this class. Each element is
either a FUNCTION_DECL, a TEMPLATE_DECL, or an OVERLOAD. All
functions with the same name end up in the same slot. The first
two elements are for constructors, and destructors, respectively.
All template conversion operators to innermost template dependent
types are overloaded on the next slot, if they exist. Note, the
names for these functions will not all be the same. The
non-template conversion operators & templated conversions to
non-innermost template types are next, followed by ordinary member
functions. There may be empty entries at the end of the vector.
The conversion operators are unsorted. The ordinary member
functions are sorted, once the class is complete. */
#define CLASSTYPE_METHOD_VEC(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->methods)
/* For class templates, this is a TREE_LIST of all member data,
functions, types, and friends in the order of declaration.
The TREE_PURPOSE of each TREE_LIST is NULL_TREE for a friend,
and the RECORD_TYPE for the class template otherwise. */
#define CLASSTYPE_DECL_LIST(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->decl_list)
/* The slot in the CLASSTYPE_METHOD_VEC where constructors go. */
#define CLASSTYPE_CONSTRUCTOR_SLOT 0
/* The slot in the CLASSTYPE_METHOD_VEC where destructors go. */
#define CLASSTYPE_DESTRUCTOR_SLOT 1
/* The first slot in the CLASSTYPE_METHOD_VEC where conversion
operators can appear. */
#define CLASSTYPE_FIRST_CONVERSION_SLOT 2
/* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These
are the constructors that take an in-charge parameter. */
#define CLASSTYPE_CONSTRUCTORS(NODE) \
((*CLASSTYPE_METHOD_VEC (NODE))[CLASSTYPE_CONSTRUCTOR_SLOT])
/* A FUNCTION_DECL for the destructor for NODE. These are the
destructors that take an in-charge parameter. If
CLASSTYPE_LAZY_DESTRUCTOR is true, then this entry will be NULL
until the destructor is created with lazily_declare_fn. */
#define CLASSTYPE_DESTRUCTORS(NODE) \
(CLASSTYPE_METHOD_VEC (NODE) \
? (*CLASSTYPE_METHOD_VEC (NODE))[CLASSTYPE_DESTRUCTOR_SLOT] \
: NULL_TREE)
/* A dictionary of the nested user-defined-types (class-types, or enums)
found within this class. This table includes nested member class
templates. */
#define CLASSTYPE_NESTED_UTDS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->nested_udts)
/* Nonzero if NODE has a primary base class, i.e., a base class with
which it shares the virtual function table pointer. */
#define CLASSTYPE_HAS_PRIMARY_BASE_P(NODE) \
(CLASSTYPE_PRIMARY_BINFO (NODE) != NULL_TREE)
/* If non-NULL, this is the binfo for the primary base class, i.e.,
the base class which contains the virtual function table pointer
for this class. */
#define CLASSTYPE_PRIMARY_BINFO(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->primary_base)
/* A vector of BINFOs for the direct and indirect virtual base classes
that this type uses in a post-order depth-first left-to-right
order. (In other words, these bases appear in the order that they
should be initialized.) */
#define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases)
/* The type corresponding to NODE when NODE is used as a base class,
i.e., NODE without virtual base classes. */
#define CLASSTYPE_AS_BASE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->as_base)
/* True iff NODE is the CLASSTYPE_AS_BASE version of some type. */
#define IS_FAKE_BASE_TYPE(NODE) \
(TREE_CODE (NODE) == RECORD_TYPE \
&& TYPE_CONTEXT (NODE) && CLASS_TYPE_P (TYPE_CONTEXT (NODE)) \
&& CLASSTYPE_AS_BASE (TYPE_CONTEXT (NODE)) == (NODE))
/* These are the size and alignment of the type without its virtual
base classes, for when we use this type as a base itself. */
#define CLASSTYPE_SIZE(NODE) TYPE_SIZE (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_SIZE_UNIT(NODE) TYPE_SIZE_UNIT (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_ALIGN(NODE) TYPE_ALIGN (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_USER_ALIGN(NODE) TYPE_USER_ALIGN (CLASSTYPE_AS_BASE (NODE))
/* The alignment of NODE, without its virtual bases, in bytes. */
#define CLASSTYPE_ALIGN_UNIT(NODE) \
(CLASSTYPE_ALIGN (NODE) / BITS_PER_UNIT)
/* True if this a Java interface type, declared with
'__attribute__ ((java_interface))'. */
#define TYPE_JAVA_INTERFACE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->java_interface)
/* A vec<tree> of virtual functions which cannot be inherited by
derived classes. When deriving from this type, the derived
class must provide its own definition for each of these functions. */
#define CLASSTYPE_PURE_VIRTUALS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->pure_virtuals)
/* Nonzero means that this type is an abstract class type. */
#define ABSTRACT_CLASS_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_PURE_VIRTUALS(NODE))
/* Nonzero means that this type has an X() constructor. */
#define TYPE_HAS_DEFAULT_CONSTRUCTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.has_default_ctor)
/* Nonzero means that this type contains a mutable member. */
#define CLASSTYPE_HAS_MUTABLE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_mutable)
#define TYPE_HAS_MUTABLE_P(NODE) (cp_has_mutable_p (NODE))
/* Nonzero means that this class type is not POD for the purpose of layout
(as defined in the ABI). This is different from the language's POD. */
#define CLASSTYPE_NON_LAYOUT_POD_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_pod_class)
/* Nonzero means that this class type is a non-standard-layout class. */
#define CLASSTYPE_NON_STD_LAYOUT(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_std_layout)
/* Nonzero means that this class contains pod types whose default
initialization is not a zero initialization (namely, pointers to
data members). */
#define CLASSTYPE_NON_ZERO_INIT_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_zero_init)
/* Nonzero if this class is "empty" in the sense of the C++ ABI. */
#define CLASSTYPE_EMPTY_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->empty_p)
/* Nonzero if this class is "nearly empty", i.e., contains only a
virtual function table pointer. */
#define CLASSTYPE_NEARLY_EMPTY_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->nearly_empty_p)
/* Nonzero if this class contains an empty subobject. */
#define CLASSTYPE_CONTAINS_EMPTY_CLASS_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->contains_empty_class_p)
/* A list of class types of which this type is a friend. The
TREE_VALUE is normally a TYPE, but will be a TEMPLATE_DECL in the
case of a template friend. */
#define CLASSTYPE_FRIEND_CLASSES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->friend_classes)
/* A list of the classes which grant friendship to this class. */
#define CLASSTYPE_BEFRIENDING_CLASSES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->befriending_classes)
/* The associated LAMBDA_EXPR that made this class. */
#define CLASSTYPE_LAMBDA_EXPR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lambda_expr)
/* The extra mangling scope for this closure type. */
#define LAMBDA_TYPE_EXTRA_SCOPE(NODE) \
(LAMBDA_EXPR_EXTRA_SCOPE (CLASSTYPE_LAMBDA_EXPR (NODE)))
/* Say whether this node was declared as a "class" or a "struct". */
#define CLASSTYPE_DECLARED_CLASS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->declared_class)
/* Nonzero if this class has const members
which have no specified initialization. */
#define CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE) \
(TYPE_LANG_SPECIFIC (NODE) \
? LANG_TYPE_CLASS_CHECK (NODE)->h.const_needs_init : 0)
#define SET_CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE, VALUE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.const_needs_init = (VALUE))
/* Nonzero if this class has ref members
which have no specified initialization. */
#define CLASSTYPE_REF_FIELDS_NEED_INIT(NODE) \
(TYPE_LANG_SPECIFIC (NODE) \
? LANG_TYPE_CLASS_CHECK (NODE)->h.ref_needs_init : 0)
#define SET_CLASSTYPE_REF_FIELDS_NEED_INIT(NODE, VALUE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.ref_needs_init = (VALUE))
/* Nonzero if this class is included from a header file which employs
`#pragma interface', and it is not included in its implementation file. */
#define CLASSTYPE_INTERFACE_ONLY(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_only)
/* True if we have already determined whether or not vtables, VTTs,
typeinfo, and other similar per-class data should be emitted in
this translation unit. This flag does not indicate whether or not
these items should be emitted; it only indicates that we know one
way or the other. */
#define CLASSTYPE_INTERFACE_KNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown == 0)
/* The opposite of CLASSTYPE_INTERFACE_KNOWN. */
#define CLASSTYPE_INTERFACE_UNKNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown)
#define SET_CLASSTYPE_INTERFACE_UNKNOWN_X(NODE,X) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = !!(X))
#define SET_CLASSTYPE_INTERFACE_UNKNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 1)
#define SET_CLASSTYPE_INTERFACE_KNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 0)
/* Nonzero if a _DECL node requires us to output debug info for this class. */
#define CLASSTYPE_DEBUG_REQUESTED(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->debug_requested)
/* Additional macros for inheritance information. */
/* Nonzero means that this class is on a path leading to a new vtable. */
#define BINFO_VTABLE_PATH_MARKED(NODE) BINFO_FLAG_1 (NODE)
/* Nonzero means B (a BINFO) has its own vtable. Any copies will not
have this flag set. */
#define BINFO_NEW_VTABLE_MARKED(B) (BINFO_FLAG_2 (B))
/* Compare a BINFO_TYPE with another type for equality. For a binfo,
this is functionally equivalent to using same_type_p, but
measurably faster. At least one of the arguments must be a
BINFO_TYPE. The other can be a BINFO_TYPE or a regular type. If
BINFO_TYPE(T) ever stops being the main variant of the class the
binfo is for, this macro must change. */
#define SAME_BINFO_TYPE_P(A, B) ((A) == (B))
/* Any subobject that needs a new vtable must have a vptr and must not
be a non-virtual primary base (since it would then use the vtable from a
derived class and never become non-primary.) */
#define SET_BINFO_NEW_VTABLE_MARKED(B) \
(BINFO_NEW_VTABLE_MARKED (B) = 1, \
gcc_assert (!BINFO_PRIMARY_P (B) || BINFO_VIRTUAL_P (B)), \
gcc_assert (TYPE_VFIELD (BINFO_TYPE (B))))
/* Nonzero if this binfo is for a dependent base - one that should not
be searched. */
#define BINFO_DEPENDENT_BASE_P(NODE) BINFO_FLAG_3 (NODE)
/* Nonzero if this binfo has lost its primary base binfo (because that
is a nearly-empty virtual base that has been taken by some other
base in the complete hierarchy. */
#define BINFO_LOST_PRIMARY_P(NODE) BINFO_FLAG_4 (NODE)
/* Nonzero if this BINFO is a primary base class. */
#define BINFO_PRIMARY_P(NODE) BINFO_FLAG_5(NODE)
/* Used by various search routines. */
#define IDENTIFIER_MARKED(NODE) TREE_LANG_FLAG_0 (NODE)
/* A vec<tree_pair_s> of the vcall indices associated with the class
NODE. The PURPOSE of each element is a FUNCTION_DECL for a virtual
function. The VALUE is the index into the virtual table where the
vcall offset for that function is stored, when NODE is a virtual
base. */
#define CLASSTYPE_VCALL_INDICES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->vcall_indices)
/* The various vtables for the class NODE. The primary vtable will be
first, followed by the construction vtables and VTT, if any. */
#define CLASSTYPE_VTABLES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->vtables)
/* The std::type_info variable representing this class, or NULL if no
such variable has been created. This field is only set for the
TYPE_MAIN_VARIANT of the class. */
#define CLASSTYPE_TYPEINFO_VAR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var)
/* Accessor macros for the BINFO_VIRTUALS list. */
/* The number of bytes by which to adjust the `this' pointer when
calling this virtual function. Subtract this value from the this
pointer. Always non-NULL, might be constant zero though. */
#define BV_DELTA(NODE) (TREE_PURPOSE (NODE))
/* If non-NULL, the vtable index at which to find the vcall offset
when calling this virtual function. Add the value at that vtable
index to the this pointer. */
#define BV_VCALL_INDEX(NODE) (TREE_TYPE (NODE))
/* The function to call. */
#define BV_FN(NODE) (TREE_VALUE (NODE))
/* Whether or not this entry is for a lost primary virtual base. */
#define BV_LOST_PRIMARY(NODE) (TREE_LANG_FLAG_0 (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, a list of the exceptions that
this type can raise. Each TREE_VALUE is a _TYPE. The TREE_VALUE
will be NULL_TREE to indicate a throw specification of `()', or
no exceptions allowed. For a noexcept specification, TREE_VALUE
is NULL_TREE and TREE_PURPOSE is the constant-expression. For
a deferred noexcept-specification, TREE_PURPOSE is a DEFERRED_NOEXCEPT
(for templates) or an OVERLOAD list of functions (for implicitly
declared functions). */
#define TYPE_RAISES_EXCEPTIONS(NODE) TYPE_LANG_SLOT_1 (NODE)
/* For FUNCTION_TYPE or METHOD_TYPE, return 1 iff it is declared `throw()'
or noexcept(true). */
#define TYPE_NOTHROW_P(NODE) nothrow_spec_p (TYPE_RAISES_EXCEPTIONS (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, true if NODE is noexcept. This is the
case for things declared noexcept(true) and, with -fnothrow-opt, for
throw() functions. */
#define TYPE_NOEXCEPT_P(NODE) type_noexcept_p (NODE)
/* The binding level associated with the namespace. */
#define NAMESPACE_LEVEL(NODE) \
(LANG_DECL_NS_CHECK (NODE)->level)
/* Flags shared by all forms of DECL_LANG_SPECIFIC.
Some of the flags live here only to make lang_decl_min/fn smaller. Do
not make this struct larger than 32 bits; instead, make sel smaller. */
struct GTY(()) lang_decl_base {
unsigned selector : 16; /* Larger than necessary for faster access. */
ENUM_BITFIELD(languages) language : 4;
unsigned use_template : 2;
unsigned not_really_extern : 1; /* var or fn */
unsigned initialized_in_class : 1; /* var or fn */
unsigned repo_available_p : 1; /* var or fn */
unsigned threadprivate_or_deleted_p : 1; /* var or fn */
unsigned anticipated_p : 1; /* fn, type or template */
unsigned friend_attr : 1; /* fn, type or template */
unsigned template_conv_p : 1; /* var or template */
unsigned odr_used : 1; /* var or fn */
unsigned u2sel : 1;
/* 1 spare bit */
};
/* True for DECL codes which have template info and access. */
#define LANG_DECL_HAS_MIN(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL \
|| TREE_CODE (NODE) == FIELD_DECL \
|| TREE_CODE (NODE) == VAR_DECL \
|| TREE_CODE (NODE) == CONST_DECL \
|| TREE_CODE (NODE) == TYPE_DECL \
|| TREE_CODE (NODE) == TEMPLATE_DECL \
|| TREE_CODE (NODE) == USING_DECL)
/* DECL_LANG_SPECIFIC for the above codes. */
struct GTY(()) lang_decl_min {
struct lang_decl_base base;
/* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is
THUNK_ALIAS.
In a FUNCTION_DECL for which DECL_THUNK_P does not hold,
VAR_DECL, TYPE_DECL, or TEMPLATE_DECL, this is
DECL_TEMPLATE_INFO. */
tree template_info;
union lang_decl_u2 {
/* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is
THUNK_VIRTUAL_OFFSET.
Otherwise this is DECL_ACCESS. */
tree GTY ((tag ("0"))) access;
/* For VAR_DECL in function, this is DECL_DISCRIMINATOR. */
int GTY ((tag ("1"))) discriminator;
} GTY ((desc ("%0.u.base.u2sel"))) u2;
};
/* Additional DECL_LANG_SPECIFIC information for functions. */
struct GTY(()) lang_decl_fn {
struct lang_decl_min min;
/* In an overloaded operator, this is the value of
DECL_OVERLOADED_OPERATOR_P. */
ENUM_BITFIELD (tree_code) operator_code : 16;
unsigned global_ctor_p : 1;
unsigned global_dtor_p : 1;
unsigned constructor_attr : 1;
unsigned destructor_attr : 1;
unsigned assignment_operator_p : 1;
unsigned static_function : 1;
unsigned pure_virtual : 1;
unsigned defaulted_p : 1;
unsigned has_in_charge_parm_p : 1;
unsigned has_vtt_parm_p : 1;
unsigned pending_inline_p : 1;
unsigned nonconverting : 1;
unsigned thunk_p : 1;
unsigned this_thunk_p : 1;
unsigned hidden_friend_p : 1;
/* 1 spare bit. */
/* For a non-thunk function decl, this is a tree list of
friendly classes. For a thunk function decl, it is the
thunked to function decl. */
tree befriending_classes;
/* For a non-virtual FUNCTION_DECL, this is
DECL_FRIEND_CONTEXT. For a virtual FUNCTION_DECL for which
DECL_THIS_THUNK_P does not hold, this is DECL_THUNKS. Both
this pointer and result pointer adjusting thunks are
chained here. This pointer thunks to return pointer thunks
will be chained on the return pointer thunk. */
tree context;
union lang_decl_u5
{
/* In a non-thunk FUNCTION_DECL or TEMPLATE_DECL, this is
DECL_CLONED_FUNCTION. */
tree GTY ((tag ("0"))) cloned_function;
/* In a FUNCTION_DECL for which THUNK_P holds this is the
THUNK_FIXED_OFFSET. */
HOST_WIDE_INT GTY ((tag ("1"))) fixed_offset;
} GTY ((desc ("%1.thunk_p"))) u5;
union lang_decl_u3
{
struct cp_token_cache * GTY ((tag ("1"))) pending_inline_info;
struct language_function * GTY ((tag ("0")))
saved_language_function;
} GTY ((desc ("%1.pending_inline_p"))) u;
};
/* DECL_LANG_SPECIFIC for namespaces. */
struct GTY(()) lang_decl_ns {
struct lang_decl_base base;
cp_binding_level *level;
};
/* DECL_LANG_SPECIFIC for parameters. */
struct GTY(()) lang_decl_parm {
struct lang_decl_base base;
int level;
int index;
};
/* DECL_LANG_SPECIFIC for all types. It would be nice to just make this a
union rather than a struct containing a union as its only field, but
tree.h declares it as a struct. */
struct GTY((variable_size)) lang_decl {
union GTY((desc ("%h.base.selector"))) lang_decl_u {
struct lang_decl_base GTY ((default)) base;
struct lang_decl_min GTY((tag ("0"))) min;
struct lang_decl_fn GTY ((tag ("1"))) fn;
struct lang_decl_ns GTY((tag ("2"))) ns;
struct lang_decl_parm GTY((tag ("3"))) parm;
} u;
};
/* Looks through a template (if present) to find what it declares. */
#define STRIP_TEMPLATE(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL ? DECL_TEMPLATE_RESULT (NODE) : NODE)
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define LANG_DECL_MIN_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (!LANG_DECL_HAS_MIN (NODE)) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.min; })
/* We want to be able to check DECL_CONSTRUCTOR_P and such on a function
template, not just on a FUNCTION_DECL. So when looking for things in
lang_decl_fn, look down through a TEMPLATE_DECL into its result. */
#define LANG_DECL_FN_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE)); \
if (!DECL_DECLARES_FUNCTION_P (NODE) || lt->u.base.selector != 1) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.fn; })
#define LANG_DECL_NS_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (TREE_CODE (NODE) != NAMESPACE_DECL || lt->u.base.selector != 2) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.ns; })
#define LANG_DECL_PARM_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (TREE_CODE (NODE) != PARM_DECL) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.parm; })
#define LANG_DECL_U2_CHECK(NODE, TF) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (!LANG_DECL_HAS_MIN (NODE) || lt->u.base.u2sel != TF) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.min.u2; })
#else
#define LANG_DECL_MIN_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.min)
#define LANG_DECL_FN_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE))->u.fn)
#define LANG_DECL_NS_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.ns)
#define LANG_DECL_PARM_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.parm)
#define LANG_DECL_U2_CHECK(NODE, TF) \
(&DECL_LANG_SPECIFIC (NODE)->u.min.u2)
#endif /* ENABLE_TREE_CHECKING */
/* For a FUNCTION_DECL or a VAR_DECL, the language linkage for the
declaration. Some entities (like a member function in a local
class, or a local variable) do not have linkage at all, and this
macro should not be used in those cases.
Implementation note: A FUNCTION_DECL without DECL_LANG_SPECIFIC was
created by language-independent code, and has C linkage. Most
VAR_DECLs have C++ linkage, and do not have DECL_LANG_SPECIFIC, but
we do create DECL_LANG_SPECIFIC for variables with non-C++ linkage. */
#define DECL_LANGUAGE(NODE) \
(DECL_LANG_SPECIFIC (NODE) \
? DECL_LANG_SPECIFIC (NODE)->u.base.language \
: (TREE_CODE (NODE) == FUNCTION_DECL \
? lang_c : lang_cplusplus))
/* Set the language linkage for NODE to LANGUAGE. */
#define SET_DECL_LANGUAGE(NODE, LANGUAGE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.language = (LANGUAGE))
/* For FUNCTION_DECLs: nonzero means that this function is a constructor. */
#define DECL_CONSTRUCTOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->constructor_attr)
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a complete
object. */
#define DECL_COMPLETE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == complete_ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a base
object. */
#define DECL_BASE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == base_ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor, but not either the
specialized in-charge constructor or the specialized not-in-charge
constructor. */
#define DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P(NODE) \
(DECL_DECLARES_FUNCTION_P (NODE) && DECL_CONSTRUCTOR_P (NODE) \
&& !DECL_CLONED_FUNCTION_P (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a copy constructor. */
#define DECL_COPY_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) && copy_fn_p (NODE) > 0)
/* Nonzero if NODE (a FUNCTION_DECL) is a move constructor. */
#define DECL_MOVE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) && move_fn_p (NODE))
/* Nonzero if NODE is a destructor. */
#define DECL_DESTRUCTOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->destructor_attr)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor, but not the
specialized in-charge constructor, in-charge deleting constructor,
or the base destructor. */
#define DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P(NODE) \
(DECL_DECLARES_FUNCTION_P (NODE) && DECL_DESTRUCTOR_P (NODE) \
&& !DECL_CLONED_FUNCTION_P (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
object. */
#define DECL_COMPLETE_DESTRUCTOR_P(NODE) \
(DECL_DESTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == complete_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a base
object. */
#define DECL_BASE_DESTRUCTOR_P(NODE) \
(DECL_DESTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == base_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
object that deletes the object after it has been destroyed. */
#define DECL_DELETING_DESTRUCTOR_P(NODE) \
(DECL_DESTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == deleting_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a cloned constructor or
destructor. */
#define DECL_CLONED_FUNCTION_P(NODE) (!!decl_cloned_function_p (NODE, true))
/* If DECL_CLONED_FUNCTION_P holds, this is the function that was
cloned. */
#define DECL_CLONED_FUNCTION(NODE) (*decl_cloned_function_p (NODE, false))
/* Perform an action for each clone of FN, if FN is a function with
clones. This macro should be used like:
FOR_EACH_CLONE (clone, fn)
{ ... }
*/
#define FOR_EACH_CLONE(CLONE, FN) \
if (TREE_CODE (FN) == FUNCTION_DECL \
&& (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (FN) \
|| DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (FN))) \
for (CLONE = DECL_CHAIN (FN); \
CLONE && DECL_CLONED_FUNCTION_P (CLONE); \
CLONE = DECL_CHAIN (CLONE))
/* Nonzero if NODE has DECL_DISCRIMINATOR and not DECL_ACCESS. */
#define DECL_DISCRIMINATOR_P(NODE) \
(TREE_CODE (NODE) == VAR_DECL \
&& DECL_FUNCTION_SCOPE_P (NODE))
/* Discriminator for name mangling. */
#define DECL_DISCRIMINATOR(NODE) (LANG_DECL_U2_CHECK (NODE, 1)->discriminator)
/* True iff DECL_DISCRIMINATOR is set for a DECL_DISCRIMINATOR_P decl. */
#define DECL_DISCRIMINATOR_SET_P(NODE) \
(DECL_LANG_SPECIFIC (NODE) && DECL_LANG_SPECIFIC (NODE)->u.base.u2sel == 1)
/* The index of a user-declared parameter in its function, starting at 1.
All artificial parameters will have index 0. */
#define DECL_PARM_INDEX(NODE) \
(LANG_DECL_PARM_CHECK (NODE)->index)
/* The level of a user-declared parameter in its function, starting at 1.
A parameter of the function will have level 1; a parameter of the first
nested function declarator (i.e. t in void f (void (*p)(T t))) will have
level 2. */
#define DECL_PARM_LEVEL(NODE) \
(LANG_DECL_PARM_CHECK (NODE)->level)
/* Nonzero if the VTT parm has been added to NODE. */
#define DECL_HAS_VTT_PARM_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->has_vtt_parm_p)
/* Nonzero if NODE is a FUNCTION_DECL for which a VTT parameter is
required. */
#define DECL_NEEDS_VTT_PARM_P(NODE) \
(CLASSTYPE_VBASECLASSES (DECL_CONTEXT (NODE)) \
&& (DECL_BASE_CONSTRUCTOR_P (NODE) \
|| DECL_BASE_DESTRUCTOR_P (NODE)))
/* Nonzero if NODE is a user-defined conversion operator. */
#define DECL_CONV_FN_P(NODE) \
(DECL_NAME (NODE) && IDENTIFIER_TYPENAME_P (DECL_NAME (NODE)))
/* If FN is a conversion operator, the type to which it converts.
Otherwise, NULL_TREE. */
#define DECL_CONV_FN_TYPE(FN) \
(DECL_CONV_FN_P (FN) ? TREE_TYPE (DECL_NAME (FN)) : NULL_TREE)
/* Nonzero if NODE, which is a TEMPLATE_DECL, is a template
conversion operator to a type dependent on the innermost template
args. */
#define DECL_TEMPLATE_CONV_FN_P(NODE) \
(DECL_LANG_SPECIFIC (TEMPLATE_DECL_CHECK (NODE))->u.base.template_conv_p)
/* Nonzero if NODE, a static data member, was declared in its class as an
array of unknown bound. */
#define VAR_HAD_UNKNOWN_BOUND(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
? DECL_LANG_SPECIFIC (NODE)->u.base.template_conv_p \
: false)
#define SET_VAR_HAD_UNKNOWN_BOUND(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.template_conv_p = true)
/* Set the overloaded operator code for NODE to CODE. */
#define SET_OVERLOADED_OPERATOR_CODE(NODE, CODE) \
(LANG_DECL_FN_CHECK (NODE)->operator_code = (CODE))
/* If NODE is an overloaded operator, then this returns the TREE_CODE
associated with the overloaded operator.
DECL_ASSIGNMENT_OPERATOR_P must also be checked to determine
whether or not NODE is an assignment operator. If NODE is not an
overloaded operator, ERROR_MARK is returned. Since the numerical
value of ERROR_MARK is zero, this macro can be used as a predicate
to test whether or not NODE is an overloaded operator. */
#define DECL_OVERLOADED_OPERATOR_P(NODE) \
(IDENTIFIER_OPNAME_P (DECL_NAME (NODE)) \
? LANG_DECL_FN_CHECK (NODE)->operator_code : ERROR_MARK)
/* Nonzero if NODE is an assignment operator (including += and such). */
#define DECL_ASSIGNMENT_OPERATOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->assignment_operator_p)
/* For FUNCTION_DECLs: nonzero means that this function is a
constructor or a destructor with an extra in-charge parameter to
control whether or not virtual bases are constructed. */
#define DECL_HAS_IN_CHARGE_PARM_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->has_in_charge_parm_p)
/* Nonzero if DECL is a declaration of __builtin_constant_p. */
#define DECL_IS_BUILTIN_CONSTANT_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL \
&& DECL_BUILT_IN_CLASS (NODE) == BUILT_IN_NORMAL \
&& DECL_FUNCTION_CODE (NODE) == BUILT_IN_CONSTANT_P)
/* Nonzero for _DECL means that this decl appears in (or will appear
in) as a member in a RECORD_TYPE or UNION_TYPE node. It is also for
detecting circularity in case members are multiply defined. In the
case of a VAR_DECL, it is also used to determine how program storage
should be allocated. */
#define DECL_IN_AGGR_P(NODE) (DECL_LANG_FLAG_3 (NODE))
/* Nonzero for a VAR_DECL means that the variable's initialization (if
any) has been processed. (In general, DECL_INITIALIZED_P is
!DECL_EXTERNAL, but static data members may be initialized even if
not defined.) */
#define DECL_INITIALIZED_P(NODE) \
(TREE_LANG_FLAG_1 (VAR_DECL_CHECK (NODE)))
/* Nonzero for a VAR_DECL iff an explicit initializer was provided
or a non-trivial constructor is called. */
#define DECL_NONTRIVIALLY_INITIALIZED_P(NODE) \
(TREE_LANG_FLAG_3 (VAR_DECL_CHECK (NODE)))
/* Nonzero for a VAR_DECL that was initialized with a
constant-expression. */
#define DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P(NODE) \
(TREE_LANG_FLAG_2 (VAR_DECL_CHECK (NODE)))
/* Nonzero if the DECL was initialized in the class definition itself,
rather than outside the class. This is used for both static member
VAR_DECLS, and FUNCTION_DECLS that are defined in the class. */
#define DECL_INITIALIZED_IN_CLASS_P(DECL) \
(DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \
->u.base.initialized_in_class)
/* Nonzero if the DECL is used in the sense of 3.2 [basic.def.odr].
Only available for decls with DECL_LANG_SPECIFIC. */
#define DECL_ODR_USED(DECL) \
(DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \
->u.base.odr_used)
/* Nonzero for DECL means that this decl is just a friend declaration,
and should not be added to the list of members for this class. */
#define DECL_FRIEND_P(NODE) \
(DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \
->u.base.friend_attr)
/* A TREE_LIST of the types which have befriended this FUNCTION_DECL. */
#define DECL_BEFRIENDING_CLASSES(NODE) \
(LANG_DECL_FN_CHECK (NODE)->befriending_classes)
/* Nonzero for FUNCTION_DECL means that this decl is a static
member function. */
#define DECL_STATIC_FUNCTION_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->static_function)
/* Nonzero for FUNCTION_DECL means that this decl is a non-static
member function. */
#define DECL_NONSTATIC_MEMBER_FUNCTION_P(NODE) \
(TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE)
/* Nonzero for FUNCTION_DECL means that this decl is a member function
(static or non-static). */
#define DECL_FUNCTION_MEMBER_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) || DECL_STATIC_FUNCTION_P (NODE))
/* Nonzero for FUNCTION_DECL means that this member function
has `this' as const X *const. */
#define DECL_CONST_MEMFUNC_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
&& CP_TYPE_CONST_P (TREE_TYPE (TREE_VALUE \
(TYPE_ARG_TYPES (TREE_TYPE (NODE))))))
/* Nonzero for FUNCTION_DECL means that this member function
has `this' as volatile X *const. */
#define DECL_VOLATILE_MEMFUNC_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
&& CP_TYPE_VOLATILE_P (TREE_TYPE (TREE_VALUE \
(TYPE_ARG_TYPES (TREE_TYPE (NODE))))))
/* Nonzero for a DECL means that this member is a non-static member. */
#define DECL_NONSTATIC_MEMBER_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
|| TREE_CODE (NODE) == FIELD_DECL)
/* Nonzero for _DECL means that this member object type
is mutable. */
#define DECL_MUTABLE_P(NODE) (DECL_LANG_FLAG_0 (NODE))
/* Nonzero for _DECL means that this constructor or conversion function is
non-converting. */
#define DECL_NONCONVERTING_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->nonconverting)
/* Nonzero for FUNCTION_DECL means that this member function is a pure
virtual function. */
#define DECL_PURE_VIRTUAL_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->pure_virtual)
/* True (in a FUNCTION_DECL) if NODE is a virtual function that is an
invalid overrider for a function from a base class. Once we have
complained about an invalid overrider we avoid complaining about it
again. */
#define DECL_INVALID_OVERRIDER_P(NODE) \
(DECL_LANG_FLAG_4 (NODE))
/* True (in a FUNCTION_DECL) if NODE is a function declared with
an override virt-specifier */
#define DECL_OVERRIDE_P(NODE) (TREE_LANG_FLAG_0 (NODE))
/* True (in a FUNCTION_DECL) if NODE is a function declared with
a final virt-specifier */
#define DECL_FINAL_P(NODE) (TREE_LANG_FLAG_1 (NODE))
/* The thunks associated with NODE, a FUNCTION_DECL. */
#define DECL_THUNKS(NODE) \
(DECL_VIRTUAL_P (NODE) ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE)
/* Set DECL_THUNKS. */
#define SET_DECL_THUNKS(NODE,THUNKS) \
(LANG_DECL_FN_CHECK (NODE)->context = (THUNKS))
/* If NODE, a FUNCTION_DECL, is a C++11 inheriting constructor, then this
is the base it inherits from. */
#define DECL_INHERITED_CTOR_BASE(NODE) \
(DECL_CONSTRUCTOR_P (NODE) ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE)
/* Set the inherited base. */
#define SET_DECL_INHERITED_CTOR_BASE(NODE,INH) \
(LANG_DECL_FN_CHECK (NODE)->context = (INH))
/* Nonzero if NODE is a thunk, rather than an ordinary function. */
#define DECL_THUNK_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL \
&& DECL_LANG_SPECIFIC (NODE) \
&& LANG_DECL_FN_CHECK (NODE)->thunk_p)
/* Set DECL_THUNK_P for node. */
#define SET_DECL_THUNK_P(NODE, THIS_ADJUSTING) \
(LANG_DECL_FN_CHECK (NODE)->thunk_p = 1, \
LANG_DECL_FN_CHECK (NODE)->this_thunk_p = (THIS_ADJUSTING))
/* Nonzero if NODE is a this pointer adjusting thunk. */
#define DECL_THIS_THUNK_P(NODE) \
(DECL_THUNK_P (NODE) && LANG_DECL_FN_CHECK (NODE)->this_thunk_p)
/* Nonzero if NODE is a result pointer adjusting thunk. */
#define DECL_RESULT_THUNK_P(NODE) \
(DECL_THUNK_P (NODE) && !LANG_DECL_FN_CHECK (NODE)->this_thunk_p)
/* Nonzero if NODE is a FUNCTION_DECL, but not a thunk. */
#define DECL_NON_THUNK_FUNCTION_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL && !DECL_THUNK_P (NODE))
/* Nonzero if NODE is `extern "C"'. */
#define DECL_EXTERN_C_P(NODE) \
(DECL_LANGUAGE (NODE) == lang_c)
/* Nonzero if NODE is an `extern "C"' function. */
#define DECL_EXTERN_C_FUNCTION_P(NODE) \
(DECL_NON_THUNK_FUNCTION_P (NODE) && DECL_EXTERN_C_P (NODE))
/* True iff DECL is an entity with vague linkage whose definition is
available in this translation unit. */
#define DECL_REPO_AVAILABLE_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.repo_available_p)
/* True if DECL is declared 'constexpr'. */
#define DECL_DECLARED_CONSTEXPR_P(DECL) \
DECL_LANG_FLAG_8 (VAR_OR_FUNCTION_DECL_CHECK (STRIP_TEMPLATE (DECL)))
/* Nonzero if this DECL is the __PRETTY_FUNCTION__ variable in a
template function. */
#define DECL_PRETTY_FUNCTION_P(NODE) \
(DECL_NAME (NODE) \
&& !strcmp (IDENTIFIER_POINTER (DECL_NAME (NODE)), "__PRETTY_FUNCTION__"))
/* Nonzero if the thread-local variable was declared with __thread
as opposed to thread_local. */
#define DECL_GNU_TLS_P(NODE) \
(TREE_LANG_FLAG_0 (VAR_DECL_CHECK (NODE)))
/* The _TYPE context in which this _DECL appears. This field holds the
class where a virtual function instance is actually defined. */
#define DECL_CLASS_CONTEXT(NODE) \
(DECL_CLASS_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : NULL_TREE)
/* For a non-member friend function, the class (if any) in which this
friend was defined. For example, given:
struct S { friend void f (); };
the DECL_FRIEND_CONTEXT for `f' will be `S'. */
#define DECL_FRIEND_CONTEXT(NODE) \
((DECL_DECLARES_FUNCTION_P (NODE) \
&& DECL_FRIEND_P (NODE) && !DECL_FUNCTION_MEMBER_P (NODE)) \
? LANG_DECL_FN_CHECK (NODE)->context \
: NULL_TREE)
/* Set the DECL_FRIEND_CONTEXT for NODE to CONTEXT. */
#define SET_DECL_FRIEND_CONTEXT(NODE, CONTEXT) \
(LANG_DECL_FN_CHECK (NODE)->context = (CONTEXT))
#define CP_DECL_CONTEXT(NODE) \
(!DECL_FILE_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : global_namespace)
#define CP_TYPE_CONTEXT(NODE) \
(!TYPE_FILE_SCOPE_P (NODE) ? TYPE_CONTEXT (NODE) : global_namespace)
#define FROB_CONTEXT(NODE) \
((NODE) == global_namespace ? DECL_CONTEXT (NODE) : (NODE))
/* 1 iff NODE has namespace scope, including the global namespace. */
#define DECL_NAMESPACE_SCOPE_P(NODE) \
(!DECL_TEMPLATE_PARM_P (NODE) \
&& TREE_CODE (CP_DECL_CONTEXT (NODE)) == NAMESPACE_DECL)
#define TYPE_NAMESPACE_SCOPE_P(NODE) \
(TREE_CODE (CP_TYPE_CONTEXT (NODE)) == NAMESPACE_DECL)
#define NAMESPACE_SCOPE_P(NODE) \
((DECL_P (NODE) && DECL_NAMESPACE_SCOPE_P (NODE)) \
|| (TYPE_P (NODE) && TYPE_NAMESPACE_SCOPE_P (NODE)))
/* 1 iff NODE is a class member. */
#define DECL_CLASS_SCOPE_P(NODE) \
(DECL_CONTEXT (NODE) && TYPE_P (DECL_CONTEXT (NODE)))
#define TYPE_CLASS_SCOPE_P(NODE) \
(TYPE_CONTEXT (NODE) && TYPE_P (TYPE_CONTEXT (NODE)))
/* 1 iff NODE is function-local. */
#define DECL_FUNCTION_SCOPE_P(NODE) \
(DECL_CONTEXT (NODE) \
&& TREE_CODE (DECL_CONTEXT (NODE)) == FUNCTION_DECL)
#define TYPE_FUNCTION_SCOPE_P(NODE) \
(TYPE_CONTEXT (NODE) && TREE_CODE (TYPE_CONTEXT (NODE)) == FUNCTION_DECL)
/* 1 iff VAR_DECL node NODE is a type-info decl. This flag is set for
both the primary typeinfo object and the associated NTBS name. */
#define DECL_TINFO_P(NODE) TREE_LANG_FLAG_4 (VAR_DECL_CHECK (NODE))
/* 1 iff VAR_DECL node NODE is virtual table or VTT. */
#define DECL_VTABLE_OR_VTT_P(NODE) TREE_LANG_FLAG_5 (VAR_DECL_CHECK (NODE))
/* 1 iff FUNCTION_TYPE or METHOD_TYPE has a ref-qualifier (either & or &&). */
#define FUNCTION_REF_QUALIFIED(NODE) \
TREE_LANG_FLAG_4 (FUNC_OR_METHOD_CHECK (NODE))
/* 1 iff FUNCTION_TYPE or METHOD_TYPE has &&-ref-qualifier. */
#define FUNCTION_RVALUE_QUALIFIED(NODE) \
TREE_LANG_FLAG_5 (FUNC_OR_METHOD_CHECK (NODE))
/* Returns 1 iff VAR_DECL is a construction virtual table.
DECL_VTABLE_OR_VTT_P will be true in this case and must be checked
before using this macro. */
#define DECL_CONSTRUCTION_VTABLE_P(NODE) \
TREE_LANG_FLAG_6 (VAR_DECL_CHECK (NODE))
/* 1 iff NODE is function-local, but for types. */
#define LOCAL_CLASS_P(NODE) \
(decl_function_context (TYPE_MAIN_DECL (NODE)) != NULL_TREE)
/* For a NAMESPACE_DECL: the list of using namespace directives
The PURPOSE is the used namespace, the value is the namespace
that is the common ancestor. */
#define DECL_NAMESPACE_USING(NODE) DECL_VINDEX (NAMESPACE_DECL_CHECK (NODE))
/* In a NAMESPACE_DECL, the DECL_INITIAL is used to record all users
of a namespace, to record the transitive closure of using namespace. */
#define DECL_NAMESPACE_USERS(NODE) DECL_INITIAL (NAMESPACE_DECL_CHECK (NODE))
/* In a NAMESPACE_DECL, the list of namespaces which have associated
themselves with this one. */
#define DECL_NAMESPACE_ASSOCIATIONS(NODE) \
(NAMESPACE_DECL_CHECK (NODE)->decl_non_common.saved_tree)
/* In a NAMESPACE_DECL, points to the original namespace if this is
a namespace alias. */
#define DECL_NAMESPACE_ALIAS(NODE) \
DECL_ABSTRACT_ORIGIN (NAMESPACE_DECL_CHECK (NODE))
#define ORIGINAL_NAMESPACE(NODE) \
(DECL_NAMESPACE_ALIAS (NODE) ? DECL_NAMESPACE_ALIAS (NODE) : (NODE))
/* Nonzero if NODE is the std namespace. */
#define DECL_NAMESPACE_STD_P(NODE) \
(TREE_CODE (NODE) == NAMESPACE_DECL \
&& CP_DECL_CONTEXT (NODE) == global_namespace \
&& DECL_NAME (NODE) == std_identifier)
/* In a TREE_LIST concatenating using directives, indicate indirect
directives */
#define TREE_INDIRECT_USING(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
/* In a TREE_LIST in an attribute list, indicates that the attribute
must be applied at instantiation time. */
#define ATTR_IS_DEPENDENT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
extern tree decl_shadowed_for_var_lookup (tree);
extern void decl_shadowed_for_var_insert (tree, tree);
/* Non zero if this is a using decl for a dependent scope. */
#define DECL_DEPENDENT_P(NODE) DECL_LANG_FLAG_0 (USING_DECL_CHECK (NODE))
/* The scope named in a using decl. */
#define USING_DECL_SCOPE(NODE) TREE_TYPE (USING_DECL_CHECK (NODE))
/* The decls named by a using decl. */
#define USING_DECL_DECLS(NODE) DECL_INITIAL (USING_DECL_CHECK (NODE))
/* Non zero if the using decl refers to a dependent type. */
#define USING_DECL_TYPENAME_P(NODE) DECL_LANG_FLAG_1 (USING_DECL_CHECK (NODE))
/* In a VAR_DECL, true if we have a shadowed local variable
in the shadowed var table for this VAR_DECL. */
#define DECL_HAS_SHADOWED_FOR_VAR_P(NODE) \
(VAR_DECL_CHECK (NODE)->decl_with_vis.shadowed_for_var_p)
/* In a VAR_DECL for a variable declared in a for statement,
this is the shadowed (local) variable. */
#define DECL_SHADOWED_FOR_VAR(NODE) \
(DECL_HAS_SHADOWED_FOR_VAR_P(NODE) ? decl_shadowed_for_var_lookup (NODE) : NULL)
#define SET_DECL_SHADOWED_FOR_VAR(NODE, VAL) \
(decl_shadowed_for_var_insert (NODE, VAL))
/* In a FUNCTION_DECL, this is nonzero if this function was defined in
the class definition. We have saved away the text of the function,
but have not yet processed it. */
#define DECL_PENDING_INLINE_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->pending_inline_p)
/* If DECL_PENDING_INLINE_P holds, this is the saved text of the
function. */
#define DECL_PENDING_INLINE_INFO(NODE) \
(LANG_DECL_FN_CHECK (NODE)->u.pending_inline_info)
/* Nonzero for TYPE_DECL means that it was written 'using name = type'. */
#define TYPE_DECL_ALIAS_P(NODE) \
DECL_LANG_FLAG_6 (TYPE_DECL_CHECK (NODE))
/* Nonzero for a type which is an alias for another type; i.e, a type
which declaration was written 'using name-of-type =
another-type'. */
#define TYPE_ALIAS_P(NODE) \
(TYPE_P (NODE) \
&& TYPE_NAME (NODE) \
&& TREE_CODE (TYPE_NAME (NODE)) == TYPE_DECL \
&& TYPE_DECL_ALIAS_P (TYPE_NAME (NODE)))
/* For a class type: if this structure has many fields, we'll sort them
and put them into a TREE_VEC. */
#define CLASSTYPE_SORTED_FIELDS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->sorted_fields)
/* If non-NULL for a VAR_DECL, FUNCTION_DECL, TYPE_DECL or
TEMPLATE_DECL, the entity is either a template specialization (if
DECL_USE_TEMPLATE is nonzero) or the abstract instance of the
template itself.
In either case, DECL_TEMPLATE_INFO is a TREE_LIST, whose
TREE_PURPOSE is the TEMPLATE_DECL of which this entity is a
specialization or abstract instance. The TREE_VALUE is the
template arguments used to specialize the template.
Consider:
template <typename T> struct S { friend void f(T) {} };
In this case, S<int>::f is, from the point of view of the compiler,
an instantiation of a template -- but, from the point of view of
the language, each instantiation of S results in a wholly unrelated
global function f. In this case, DECL_TEMPLATE_INFO for S<int>::f
will be non-NULL, but DECL_USE_TEMPLATE will be zero. */
#define DECL_TEMPLATE_INFO(NODE) \
(DECL_LANG_SPECIFIC (VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK (NODE)) \
->u.min.template_info)
/* For a VAR_DECL, indicates that the variable is actually a
non-static data member of anonymous union that has been promoted to
variable status. */
#define DECL_ANON_UNION_VAR_P(NODE) \
(DECL_LANG_FLAG_4 (VAR_DECL_CHECK (NODE)))
/* Template information for a RECORD_TYPE or UNION_TYPE. */
#define CLASSTYPE_TEMPLATE_INFO(NODE) \
(LANG_TYPE_CLASS_CHECK (RECORD_OR_UNION_CHECK (NODE))->template_info)
/* Template information for an ENUMERAL_TYPE. Although an enumeration may
not be a primary template, it may be declared within the scope of a
primary template and the enumeration constants may depend on
non-type template parameters. */
#define ENUM_TEMPLATE_INFO(NODE) \
(TYPE_LANG_SLOT_1 (ENUMERAL_TYPE_CHECK (NODE)))
/* Template information for a template template parameter. */
#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO(NODE) \
(LANG_TYPE_CLASS_CHECK (BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK (NODE)) \
->template_info)
/* Template information for an ENUMERAL_, RECORD_, UNION_TYPE, or
BOUND_TEMPLATE_TEMPLATE_PARM type. Note that if NODE is a
specialization of an alias template, this accessor returns the
template info for the alias template, not the one (if any) for the
template of the underlying type. */
#define TYPE_TEMPLATE_INFO(NODE) \
((TYPE_ALIAS_P (NODE) && DECL_LANG_SPECIFIC (TYPE_NAME (NODE))) \
? (DECL_LANG_SPECIFIC (TYPE_NAME (NODE)) \
? DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) \
: NULL_TREE) \
: ((TREE_CODE (NODE) == ENUMERAL_TYPE) \
? ENUM_TEMPLATE_INFO (NODE) \
: ((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \
? TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (NODE) \
: (CLASS_TYPE_P (NODE) \
? CLASSTYPE_TEMPLATE_INFO (NODE) \
: NULL_TREE))))
/* Set the template information for an ENUMERAL_, RECORD_, or
UNION_TYPE to VAL. */
#define SET_TYPE_TEMPLATE_INFO(NODE, VAL) \
(TREE_CODE (NODE) == ENUMERAL_TYPE \
? (ENUM_TEMPLATE_INFO (NODE) = (VAL)) \
: ((CLASS_TYPE_P (NODE) && !TYPE_ALIAS_P (NODE)) \
? (CLASSTYPE_TEMPLATE_INFO (NODE) = (VAL)) \
: (DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) = (VAL))))
#define TI_TEMPLATE(NODE) TREE_TYPE (TEMPLATE_INFO_CHECK (NODE))
#define TI_ARGS(NODE) TREE_CHAIN (TEMPLATE_INFO_CHECK (NODE))
#define TI_PENDING_TEMPLATE_FLAG(NODE) TREE_LANG_FLAG_1 (NODE)
/* For a given TREE_VEC containing a template argument list,
this property contains the number of arguments that are not
defaulted. */
#define NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) TREE_CHAIN (TREE_VEC_CHECK (NODE))
/* Below are the setter and getter of the NON_DEFAULT_TEMPLATE_ARGS_COUNT
property. */
#define SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE, INT_VALUE) \
NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) = build_int_cst (NULL_TREE, INT_VALUE)
#ifdef ENABLE_CHECKING
#define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \
int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE))
#else
#define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \
NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE) \
? int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) \
: TREE_VEC_LENGTH (INNERMOST_TEMPLATE_ARGS (NODE))
#endif
/* The list of typedefs - used in the template - that need
access checking at template instantiation time. */
#define TI_TYPEDEFS_NEEDING_ACCESS_CHECKING(NODE) \
((struct tree_template_info*)TEMPLATE_INFO_CHECK \
(NODE))->typedefs_needing_access_checking
/* We use TREE_VECs to hold template arguments. If there is only one
level of template arguments, then the TREE_VEC contains the
arguments directly. If there is more than one level of template
arguments, then each entry in the TREE_VEC is itself a TREE_VEC,
containing the template arguments for a single level. The first
entry in the outer TREE_VEC is the outermost level of template
parameters; the last is the innermost.
It is incorrect to ever form a template argument vector containing
only one level of arguments, but which is a TREE_VEC containing as
its only entry the TREE_VEC for that level.
For each TREE_VEC containing the template arguments for a single
level, it's possible to get or set the number of non defaulted
template arguments by using the accessor macros
GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT or
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT. */
/* Nonzero if the template arguments is actually a vector of vectors,
rather than just a vector. */
#define TMPL_ARGS_HAVE_MULTIPLE_LEVELS(NODE) \
(NODE && TREE_VEC_LENGTH (NODE) && TREE_VEC_ELT (NODE, 0) \
&& TREE_CODE (TREE_VEC_ELT (NODE, 0)) == TREE_VEC)
/* The depth of a template argument vector. When called directly by
the parser, we use a TREE_LIST rather than a TREE_VEC to represent
template arguments. In fact, we may even see NULL_TREE if there
are no template arguments. In both of those cases, there is only
one level of template arguments. */
#define TMPL_ARGS_DEPTH(NODE) \
(TMPL_ARGS_HAVE_MULTIPLE_LEVELS (NODE) ? TREE_VEC_LENGTH (NODE) : 1)
/* The LEVELth level of the template ARGS. The outermost level of
args is level 1, not level 0. */
#define TMPL_ARGS_LEVEL(ARGS, LEVEL) \
(TMPL_ARGS_HAVE_MULTIPLE_LEVELS (ARGS) \
? TREE_VEC_ELT (ARGS, (LEVEL) - 1) : (ARGS))
/* Set the LEVELth level of the template ARGS to VAL. This macro does
not work with single-level argument vectors. */
#define SET_TMPL_ARGS_LEVEL(ARGS, LEVEL, VAL) \
(TREE_VEC_ELT (ARGS, (LEVEL) - 1) = (VAL))
/* Accesses the IDXth parameter in the LEVELth level of the ARGS. */
#define TMPL_ARG(ARGS, LEVEL, IDX) \
(TREE_VEC_ELT (TMPL_ARGS_LEVEL (ARGS, LEVEL), IDX))
/* Given a single level of template arguments in NODE, return the
number of arguments. */
#define NUM_TMPL_ARGS(NODE) \
(TREE_VEC_LENGTH (NODE))
/* Returns the innermost level of template arguments in ARGS. */
#define INNERMOST_TEMPLATE_ARGS(NODE) \
(get_innermost_template_args ((NODE), 1))
/* The number of levels of template parameters given by NODE. */
#define TMPL_PARMS_DEPTH(NODE) \
((HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_PURPOSE (NODE)))
/* The TEMPLATE_DECL instantiated or specialized by NODE. This
TEMPLATE_DECL will be the immediate parent, not the most general
template. For example, in:
template <class T> struct S { template <class U> void f(U); }
the FUNCTION_DECL for S<int>::f<double> will have, as its
DECL_TI_TEMPLATE, `template <class U> S<int>::f<U>'.
As a special case, for a member friend template of a template
class, this value will not be a TEMPLATE_DECL, but rather an
IDENTIFIER_NODE or OVERLOAD indicating the name of the template and
any explicit template arguments provided. For example, in:
template <class T> struct S { friend void f<int>(int, double); }
the DECL_TI_TEMPLATE will be an IDENTIFIER_NODE for `f' and the
DECL_TI_ARGS will be {int}.
For a FIELD_DECL with a non-static data member initializer, this value
is the FIELD_DECL it was instantiated from. */
#define DECL_TI_TEMPLATE(NODE) TI_TEMPLATE (DECL_TEMPLATE_INFO (NODE))
/* The template arguments used to obtain this decl from the most
general form of DECL_TI_TEMPLATE. For the example given for
DECL_TI_TEMPLATE, the DECL_TI_ARGS will be {int, double}. These
are always the full set of arguments required to instantiate this
declaration from the most general template specialized here. */
#define DECL_TI_ARGS(NODE) TI_ARGS (DECL_TEMPLATE_INFO (NODE))
/* The TEMPLATE_DECL associated with NODE, a class type. Even if NODE
will be generated from a partial specialization, the TEMPLATE_DECL
referred to here will be the original template. For example,
given:
template <typename T> struct S {};
template <typename T> struct S<T*> {};
the CLASSTPYE_TI_TEMPLATE for S<int*> will be S, not the S<T*>. */
#define CLASSTYPE_TI_TEMPLATE(NODE) TI_TEMPLATE (CLASSTYPE_TEMPLATE_INFO (NODE))
#define CLASSTYPE_TI_ARGS(NODE) TI_ARGS (CLASSTYPE_TEMPLATE_INFO (NODE))
/* For a template instantiation TYPE, returns the TYPE corresponding
to the primary template. Otherwise returns TYPE itself. */
#define CLASSTYPE_PRIMARY_TEMPLATE_TYPE(TYPE) \
((CLASSTYPE_USE_TEMPLATE ((TYPE)) \
&& !CLASSTYPE_TEMPLATE_SPECIALIZATION ((TYPE))) \
? TREE_TYPE (DECL_TEMPLATE_RESULT (DECL_PRIMARY_TEMPLATE \
(CLASSTYPE_TI_TEMPLATE ((TYPE))))) \
: (TYPE))
/* Like CLASS_TI_TEMPLATE, but also works for ENUMERAL_TYPEs. */
#define TYPE_TI_TEMPLATE(NODE) \
(TI_TEMPLATE (TYPE_TEMPLATE_INFO (NODE)))
/* Like DECL_TI_ARGS, but for an ENUMERAL_, RECORD_, or UNION_TYPE. */
#define TYPE_TI_ARGS(NODE) \
(TI_ARGS (TYPE_TEMPLATE_INFO (NODE)))
#define INNERMOST_TEMPLATE_PARMS(NODE) TREE_VALUE (NODE)
/* Nonzero if NODE (a TEMPLATE_DECL) is a member template, in the
sense of [temp.mem]. */
#define DECL_MEMBER_TEMPLATE_P(NODE) \
(DECL_LANG_FLAG_1 (TEMPLATE_DECL_CHECK (NODE)))
/* Nonzero if the NODE corresponds to the template parameters for a
member template, whose inline definition is being processed after
the class definition is complete. */
#define TEMPLATE_PARMS_FOR_INLINE(NODE) TREE_LANG_FLAG_1 (NODE)
/* Determine if a parameter (i.e., a PARM_DECL) is a function
parameter pack. */
#define FUNCTION_PARAMETER_PACK_P(NODE) \
(DECL_LANG_FLAG_1 (PARM_DECL_CHECK (NODE)))
/* Determines if NODE is an expansion of one or more parameter packs,
e.g., a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */
#define PACK_EXPANSION_P(NODE) \
(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \
|| TREE_CODE (NODE) == EXPR_PACK_EXPANSION)
/* Extracts the type or expression pattern from a TYPE_PACK_EXPANSION or
EXPR_PACK_EXPANSION. */
#define PACK_EXPANSION_PATTERN(NODE) \
(TREE_CODE (NODE) == TYPE_PACK_EXPANSION? TREE_TYPE (NODE) \
: TREE_OPERAND (NODE, 0))
/* Sets the type or expression pattern for a TYPE_PACK_EXPANSION or
EXPR_PACK_EXPANSION. */
#define SET_PACK_EXPANSION_PATTERN(NODE,VALUE) \
if (TREE_CODE (NODE) == TYPE_PACK_EXPANSION) \
TREE_TYPE (NODE) = VALUE; \
else \
TREE_OPERAND (NODE, 0) = VALUE
/* The list of parameter packs used in the PACK_EXPANSION_* node. The
TREE_VALUE of each TREE_LIST contains the parameter packs. */
#define PACK_EXPANSION_PARAMETER_PACKS(NODE) \
*(TREE_CODE (NODE) == EXPR_PACK_EXPANSION \
? &TREE_OPERAND (NODE, 1) \
: &TYPE_MINVAL (TYPE_PACK_EXPANSION_CHECK (NODE)))
/* Any additional template args to be applied when substituting into
the pattern, set by tsubst_pack_expansion for partial instantiations. */
#define PACK_EXPANSION_EXTRA_ARGS(NODE) \
*(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \
? &TYPE_MAXVAL (NODE) \
: &TREE_OPERAND ((NODE), 2))
/* True iff this pack expansion is within a function context. */
#define PACK_EXPANSION_LOCAL_P(NODE) TREE_LANG_FLAG_0 (NODE)
/* Determine if this is an argument pack. */
#define ARGUMENT_PACK_P(NODE) \
(TREE_CODE (NODE) == TYPE_ARGUMENT_PACK \
|| TREE_CODE (NODE) == NONTYPE_ARGUMENT_PACK)
/* The arguments stored in an argument pack. Arguments are stored in a
TREE_VEC, which may have length zero. */
#define ARGUMENT_PACK_ARGS(NODE) \
(TREE_CODE (NODE) == TYPE_ARGUMENT_PACK? TREE_TYPE (NODE) \
: TREE_OPERAND (NODE, 0))
/* Set the arguments stored in an argument pack. VALUE must be a
TREE_VEC. */
#define SET_ARGUMENT_PACK_ARGS(NODE,VALUE) \
if (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK) \
TREE_TYPE (NODE) = VALUE; \
else \
TREE_OPERAND (NODE, 0) = VALUE
/* Whether the argument pack is "incomplete", meaning that more
arguments can still be deduced. Incomplete argument packs are only
used when the user has provided an explicit template argument list
for a variadic function template. Some of the explicit template
arguments will be placed into the beginning of the argument pack,
but additional arguments might still be deduced. */
#define ARGUMENT_PACK_INCOMPLETE_P(NODE) \
TREE_ADDRESSABLE (ARGUMENT_PACK_ARGS (NODE))
/* When ARGUMENT_PACK_INCOMPLETE_P, stores the explicit template
arguments used to fill this pack. */
#define ARGUMENT_PACK_EXPLICIT_ARGS(NODE) \
TREE_TYPE (ARGUMENT_PACK_ARGS (NODE))
/* In an ARGUMENT_PACK_SELECT, the argument pack from which an
argument will be selected. */
#define ARGUMENT_PACK_SELECT_FROM_PACK(NODE) \
(((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->argument_pack)
/* In an ARGUMENT_PACK_SELECT, the index of the argument we want to
select. */
#define ARGUMENT_PACK_SELECT_INDEX(NODE) \
(((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->index)
/* In an ARGUMENT_PACK_SELECT, the actual underlying argument that the
ARGUMENT_PACK_SELECT represents. */
#define ARGUMENT_PACK_SELECT_ARG(NODE) \
TREE_VEC_ELT (ARGUMENT_PACK_ARGS (ARGUMENT_PACK_SELECT_FROM_PACK (NODE)), \
ARGUMENT_PACK_SELECT_INDEX (NODE));
/* In a FUNCTION_DECL, the saved language-specific per-function data. */
#define DECL_SAVED_FUNCTION_DATA(NODE) \
(LANG_DECL_FN_CHECK (FUNCTION_DECL_CHECK (NODE)) \
->u.saved_language_function)
/* True if NODE is an implicit INDIRECT_EXPR from convert_from_reference. */
#define REFERENCE_REF_P(NODE) \
(TREE_CODE (NODE) == INDIRECT_REF \
&& TREE_TYPE (TREE_OPERAND (NODE, 0)) \
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND ((NODE), 0))) \
== REFERENCE_TYPE))
#define NEW_EXPR_USE_GLOBAL(NODE) \
TREE_LANG_FLAG_0 (NEW_EXPR_CHECK (NODE))
#define DELETE_EXPR_USE_GLOBAL(NODE) \
TREE_LANG_FLAG_0 (DELETE_EXPR_CHECK (NODE))
#define DELETE_EXPR_USE_VEC(NODE) \
TREE_LANG_FLAG_1 (DELETE_EXPR_CHECK (NODE))
/* Indicates that this is a non-dependent COMPOUND_EXPR which will
resolve to a function call. */
#define COMPOUND_EXPR_OVERLOADED(NODE) \
TREE_LANG_FLAG_0 (COMPOUND_EXPR_CHECK (NODE))
/* In a CALL_EXPR appearing in a template, true if Koenig lookup
should be performed at instantiation time. */
#define KOENIG_LOOKUP_P(NODE) TREE_LANG_FLAG_0 (CALL_EXPR_CHECK (NODE))
/* Indicates whether a string literal has been parenthesized. Such
usages are disallowed in certain circumstances. */
#define PAREN_STRING_LITERAL_P(NODE) \
TREE_LANG_FLAG_0 (STRING_CST_CHECK (NODE))
/* Nonzero if this AGGR_INIT_EXPR provides for initialization via a
constructor call, rather than an ordinary function call. */
#define AGGR_INIT_VIA_CTOR_P(NODE) \
TREE_LANG_FLAG_0 (AGGR_INIT_EXPR_CHECK (NODE))
/* Nonzero if expanding this AGGR_INIT_EXPR should first zero-initialize
the object. */
#define AGGR_INIT_ZERO_FIRST(NODE) \
TREE_LANG_FLAG_2 (AGGR_INIT_EXPR_CHECK (NODE))
/* AGGR_INIT_EXPR accessors. These are equivalent to the CALL_EXPR
accessors, except for AGGR_INIT_EXPR_SLOT (which takes the place of
CALL_EXPR_STATIC_CHAIN). */
#define AGGR_INIT_EXPR_FN(NODE) TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 1)
#define AGGR_INIT_EXPR_SLOT(NODE) \
TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 2)
#define AGGR_INIT_EXPR_ARG(NODE, I) \
TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), (I) + 3)
#define aggr_init_expr_nargs(NODE) (VL_EXP_OPERAND_LENGTH(NODE) - 3)
/* AGGR_INIT_EXPR_ARGP returns a pointer to the argument vector for NODE.
We can't use &AGGR_INIT_EXPR_ARG (NODE, 0) because that will complain if
the argument count is zero when checking is enabled. Instead, do
the pointer arithmetic to advance past the 3 fixed operands in a
AGGR_INIT_EXPR. That produces a valid pointer to just past the end of
the operand array, even if it's not valid to dereference it. */
#define AGGR_INIT_EXPR_ARGP(NODE) \
(&(TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 0)) + 3)
/* Abstract iterators for AGGR_INIT_EXPRs. */
/* Structure containing iterator state. */
typedef struct aggr_init_expr_arg_iterator_d {
tree t; /* the aggr_init_expr */
int n; /* argument count */
int i; /* next argument index */
} aggr_init_expr_arg_iterator;
/* Initialize the abstract argument list iterator object ITER with the
arguments from AGGR_INIT_EXPR node EXP. */
static inline void
init_aggr_init_expr_arg_iterator (tree exp,
aggr_init_expr_arg_iterator *iter)
{
iter->t = exp;
iter->n = aggr_init_expr_nargs (exp);
iter->i = 0;
}
/* Return the next argument from abstract argument list iterator object ITER,
and advance its state. Return NULL_TREE if there are no more arguments. */
static inline tree
next_aggr_init_expr_arg (aggr_init_expr_arg_iterator *iter)
{
tree result;
if (iter->i >= iter->n)
return NULL_TREE;
result = AGGR_INIT_EXPR_ARG (iter->t, iter->i);
iter->i++;
return result;
}
/* Initialize the abstract argument list iterator object ITER, then advance
past and return the first argument. Useful in for expressions, e.g.
for (arg = first_aggr_init_expr_arg (exp, &iter); arg;
arg = next_aggr_init_expr_arg (&iter)) */
static inline tree
first_aggr_init_expr_arg (tree exp, aggr_init_expr_arg_iterator *iter)
{
init_aggr_init_expr_arg_iterator (exp, iter);
return next_aggr_init_expr_arg (iter);
}
/* Test whether there are more arguments in abstract argument list iterator
ITER, without changing its state. */
static inline bool
more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter)
{
return (iter->i < iter->n);
}
/* Iterate through each argument ARG of AGGR_INIT_EXPR CALL, using variable
ITER (of type aggr_init_expr_arg_iterator) to hold the iteration state. */
#define FOR_EACH_AGGR_INIT_EXPR_ARG(arg, iter, call) \
for ((arg) = first_aggr_init_expr_arg ((call), &(iter)); (arg); \
(arg) = next_aggr_init_expr_arg (&(iter)))
/* VEC_INIT_EXPR accessors. */
#define VEC_INIT_EXPR_SLOT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 0)
#define VEC_INIT_EXPR_INIT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 1)
/* Indicates that a VEC_INIT_EXPR is a potential constant expression.
Only set when the current function is constexpr. */
#define VEC_INIT_EXPR_IS_CONSTEXPR(NODE) \
TREE_LANG_FLAG_0 (VEC_INIT_EXPR_CHECK (NODE))
/* Indicates that a VEC_INIT_EXPR is expressing value-initialization. */
#define VEC_INIT_EXPR_VALUE_INIT(NODE) \
TREE_LANG_FLAG_1 (VEC_INIT_EXPR_CHECK (NODE))
/* The condition under which this MUST_NOT_THROW_EXPR actually blocks
exceptions. NULL_TREE means 'true'. */
#define MUST_NOT_THROW_COND(NODE) \
TREE_OPERAND (MUST_NOT_THROW_EXPR_CHECK (NODE), 1)
/* The TYPE_MAIN_DECL for a class template type is a TYPE_DECL, not a
TEMPLATE_DECL. This macro determines whether or not a given class
type is really a template type, as opposed to an instantiation or
specialization of one. */
#define CLASSTYPE_IS_TEMPLATE(NODE) \
(CLASSTYPE_TEMPLATE_INFO (NODE) \
&& !CLASSTYPE_USE_TEMPLATE (NODE) \
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE)))
/* The name used by the user to name the typename type. Typically,
this is an IDENTIFIER_NODE, and the same as the DECL_NAME on the
corresponding TYPE_DECL. However, this may also be a
TEMPLATE_ID_EXPR if we had something like `typename X::Y<T>'. */
#define TYPENAME_TYPE_FULLNAME(NODE) \
(TYPE_VALUES_RAW (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE was declared as an "enum". */
#define TYPENAME_IS_ENUM_P(NODE) \
(TREE_LANG_FLAG_0 (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE was declared as a "class", "struct", or
"union". */
#define TYPENAME_IS_CLASS_P(NODE) \
(TREE_LANG_FLAG_1 (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE is in the process of being resolved. */
#define TYPENAME_IS_RESOLVING_P(NODE) \
(TREE_LANG_FLAG_2 (TYPENAME_TYPE_CHECK (NODE)))
/* [class.virtual]
A class that declares or inherits a virtual function is called a
polymorphic class. */
#define TYPE_POLYMORPHIC_P(NODE) (TREE_LANG_FLAG_2 (NODE))
/* Nonzero if this class has a virtual function table pointer. */
#define TYPE_CONTAINS_VPTR_P(NODE) \
(TYPE_POLYMORPHIC_P (NODE) || CLASSTYPE_VBASECLASSES (NODE))
/* This flag is true of a local VAR_DECL if it was declared in a for
statement, but we are no longer in the scope of the for. */
#define DECL_DEAD_FOR_LOCAL(NODE) DECL_LANG_FLAG_7 (VAR_DECL_CHECK (NODE))
/* This flag is set on a VAR_DECL that is a DECL_DEAD_FOR_LOCAL
if we already emitted a warning about using it. */
#define DECL_ERROR_REPORTED(NODE) DECL_LANG_FLAG_0 (VAR_DECL_CHECK (NODE))
/* Nonzero if NODE is a FUNCTION_DECL (for a function with global
scope) declared in a local scope. */
#define DECL_LOCAL_FUNCTION_P(NODE) \
DECL_LANG_FLAG_0 (FUNCTION_DECL_CHECK (NODE))
/* True if NODE was declared with auto in its return type, but it has
started compilation and so the return type might have been changed by
return type deduction; its declared return type should be found in
DECL_STRUCT_FUNCTION(NODE)->language->x_auto_return_pattern. */
#define FNDECL_USED_AUTO(NODE) \
TREE_LANG_FLAG_2 (FUNCTION_DECL_CHECK (NODE))
/* Nonzero if NODE is a DECL which we know about but which has not
been explicitly declared, such as a built-in function or a friend
declared inside a class. In the latter case DECL_HIDDEN_FRIEND_P
will be set. */
#define DECL_ANTICIPATED(NODE) \
(DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \
->u.base.anticipated_p)
/* Nonzero if NODE is a FUNCTION_DECL which was declared as a friend
within a class but has not been declared in the surrounding scope.
The function is invisible except via argument dependent lookup. */
#define DECL_HIDDEN_FRIEND_P(NODE) \
(LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->hidden_friend_p)
/* Nonzero if DECL has been declared threadprivate by
#pragma omp threadprivate. */
#define CP_DECL_THREADPRIVATE_P(DECL) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (DECL))->u.base.threadprivate_or_deleted_p)
/* Nonzero if DECL was declared with '= delete'. */
#define DECL_DELETED_FN(DECL) \
(DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->u.base.threadprivate_or_deleted_p)
/* Nonzero if DECL was declared with '= default' (maybe implicitly). */
#define DECL_DEFAULTED_FN(DECL) \
(LANG_DECL_FN_CHECK (DECL)->defaulted_p)
/* Nonzero if DECL is explicitly defaulted in the class body. */
#define DECL_DEFAULTED_IN_CLASS_P(DECL) \
(DECL_DEFAULTED_FN (DECL) && DECL_INITIALIZED_IN_CLASS_P (DECL))
/* Nonzero if DECL was defaulted outside the class body. */
#define DECL_DEFAULTED_OUTSIDE_CLASS_P(DECL) \
(DECL_DEFAULTED_FN (DECL) \
&& !(DECL_ARTIFICIAL (DECL) || DECL_INITIALIZED_IN_CLASS_P (DECL)))
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* Returns nonzero if DECL has external linkage, as specified by the
language standard. (This predicate may hold even when the
corresponding entity is not actually given external linkage in the
object file; see decl_linkage for details.) */
#define DECL_EXTERNAL_LINKAGE_P(DECL) \
(decl_linkage (DECL) == lk_external)
/* Keep these codes in ascending code order. */
#define INTEGRAL_CODE_P(CODE) \
((CODE) == ENUMERAL_TYPE \
|| (CODE) == BOOLEAN_TYPE \
|| (CODE) == INTEGER_TYPE)
/* [basic.fundamental]
Types bool, char, wchar_t, and the signed and unsigned integer types
are collectively called integral types.
Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration
types as well, which is incorrect in C++. Keep these checks in
ascending code order. */
#define CP_INTEGRAL_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == BOOLEAN_TYPE \
|| TREE_CODE (TYPE) == INTEGER_TYPE)
/* Returns true if TYPE is an integral or enumeration name. Keep
these checks in ascending code order. */
#define INTEGRAL_OR_ENUMERATION_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE || CP_INTEGRAL_TYPE_P (TYPE))
/* Returns true if TYPE is an integral or unscoped enumeration type. */
#define INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P(TYPE) \
(UNSCOPED_ENUM_P (TYPE) || CP_INTEGRAL_TYPE_P (TYPE))
/* True if the class type TYPE is a literal type. */
#define CLASSTYPE_LITERAL_P(TYPE) \
(LANG_TYPE_CLASS_CHECK (TYPE)->is_literal)
/* [basic.fundamental]
Integral and floating types are collectively called arithmetic
types.
As a GNU extension, we also accept complex types.
Keep these checks in ascending code order. */
#define ARITHMETIC_TYPE_P(TYPE) \
(CP_INTEGRAL_TYPE_P (TYPE) \
|| TREE_CODE (TYPE) == REAL_TYPE \
|| TREE_CODE (TYPE) == COMPLEX_TYPE)
/* True iff TYPE is cv decltype(nullptr). */
#define NULLPTR_TYPE_P(TYPE) (TREE_CODE (TYPE) == NULLPTR_TYPE)
/* [basic.types]
Arithmetic types, enumeration types, pointer types,
pointer-to-member types, and std::nullptr_t are collectively called
scalar types.
Keep these checks in ascending code order. */
#define SCALAR_TYPE_P(TYPE) \
(TYPE_PTRDATAMEM_P (TYPE) \
|| TREE_CODE (TYPE) == ENUMERAL_TYPE \
|| ARITHMETIC_TYPE_P (TYPE) \
|| TYPE_PTR_P (TYPE) \
|| TYPE_PTRMEMFUNC_P (TYPE) \
|| NULLPTR_TYPE_P (TYPE))
/* Determines whether this type is a C++0x scoped enumeration
type. Scoped enumerations types are introduced via "enum class" or
"enum struct", e.g.,
enum class Color {
Red, Green, Blue
};
Scoped enumeration types are different from normal (unscoped)
enumeration types in several ways:
- The enumerators of a scoped enumeration type are only available
within the scope of the enumeration type and not in the
enclosing scope. For example, the Red color can be referred to
with "Color::Red" but not "Red".
- Scoped enumerators and enumerations do not implicitly convert
to integers or 'bool'.
- The underlying type of the enum is well-defined. */
#define SCOPED_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_SCOPED (TYPE))
/* Determine whether this is an unscoped enumeration type. */
#define UNSCOPED_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && !ENUM_IS_SCOPED (TYPE))
/* Set the flag indicating whether an ENUMERAL_TYPE is a C++0x scoped
enumeration type (1) or a normal (unscoped) enumeration type
(0). */
#define SET_SCOPED_ENUM_P(TYPE, VAL) \
(ENUM_IS_SCOPED (TYPE) = (VAL))
#define SET_OPAQUE_ENUM_P(TYPE, VAL) \
(ENUM_IS_OPAQUE (TYPE) = (VAL))
#define OPAQUE_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_OPAQUE (TYPE))
/* Determines whether an ENUMERAL_TYPE has an explicit
underlying type. */
#define ENUM_FIXED_UNDERLYING_TYPE_P(NODE) (TYPE_LANG_FLAG_5 (NODE))
/* Returns the underlying type of the given enumeration type. The
underlying type is determined in different ways, depending on the
properties of the enum:
- In C++0x, the underlying type can be explicitly specified, e.g.,
enum E1 : char { ... } // underlying type is char
- In a C++0x scoped enumeration, the underlying type is int
unless otherwises specified:
enum class E2 { ... } // underlying type is int
- Otherwise, the underlying type is determined based on the
values of the enumerators. In this case, the
ENUM_UNDERLYING_TYPE will not be set until after the definition
of the enumeration is completed by finish_enum. */
#define ENUM_UNDERLYING_TYPE(TYPE) \
TREE_TYPE (ENUMERAL_TYPE_CHECK (TYPE))
/* [dcl.init.aggr]
An aggregate is an array or a class with no user-provided
constructors, no brace-or-equal-initializers for non-static data
members, no private or protected non-static data members, no
base classes, and no virtual functions.
As an extension, we also treat vectors as aggregates. Keep these
checks in ascending code order. */
#define CP_AGGREGATE_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == VECTOR_TYPE \
||TREE_CODE (TYPE) == ARRAY_TYPE \
|| (CLASS_TYPE_P (TYPE) && !CLASSTYPE_NON_AGGREGATE (TYPE)))
/* Nonzero for a class type means that the class type has a
user-declared constructor. */
#define TYPE_HAS_USER_CONSTRUCTOR(NODE) (TYPE_LANG_FLAG_1 (NODE))
/* When appearing in an INDIRECT_REF, it means that the tree structure
underneath is actually a call to a constructor. This is needed
when the constructor must initialize local storage (which can
be automatically destroyed), rather than allowing it to allocate
space from the heap.
When appearing in a SAVE_EXPR, it means that underneath
is a call to a constructor.
When appearing in a CONSTRUCTOR, the expression is a
compound literal.
When appearing in a FIELD_DECL, it means that this field
has been duly initialized in its constructor. */
#define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4 (NODE))
/* True if NODE is a brace-enclosed initializer. */
#define BRACE_ENCLOSED_INITIALIZER_P(NODE) \
(TREE_CODE (NODE) == CONSTRUCTOR && TREE_TYPE (NODE) == init_list_type_node)
/* True if NODE is a compound-literal, i.e., a brace-enclosed
initializer cast to a particular type. */
#define COMPOUND_LITERAL_P(NODE) \
(TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE))
#define EMPTY_CONSTRUCTOR_P(NODE) (TREE_CODE (NODE) == CONSTRUCTOR \
&& vec_safe_is_empty(CONSTRUCTOR_ELTS(NODE))\
&& !TREE_HAS_CONSTRUCTOR (NODE))
/* True if NODE is a init-list used as a direct-initializer, i.e.
B b{1,2}, not B b({1,2}) or B b = {1,2}. */
#define CONSTRUCTOR_IS_DIRECT_INIT(NODE) (TREE_LANG_FLAG_0 (CONSTRUCTOR_CHECK (NODE)))
/* True if NODE represents a conversion for direct-initialization in a
template. Set by perform_implicit_conversion_flags. */
#define IMPLICIT_CONV_EXPR_DIRECT_INIT(NODE) \
(TREE_LANG_FLAG_0 (IMPLICIT_CONV_EXPR_CHECK (NODE)))
/* Nonzero means that an object of this type can not be initialized using
an initializer list. */
#define CLASSTYPE_NON_AGGREGATE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_aggregate)
#define TYPE_NON_AGGREGATE_CLASS(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_NON_AGGREGATE (NODE))
/* Nonzero if there is a non-trivial X::op=(cv X&) for this class. */
#define TYPE_HAS_COMPLEX_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_assign)
/* Nonzero if there is a non-trivial X::X(cv X&) for this class. */
#define TYPE_HAS_COMPLEX_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_ctor)
/* Nonzero if there is a non-trivial X::op=(X&&) for this class. */
#define TYPE_HAS_COMPLEX_MOVE_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_assign)
/* Nonzero if there is a non-trivial X::X(X&&) for this class. */
#define TYPE_HAS_COMPLEX_MOVE_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_ctor)
/* Nonzero if there is a non-trivial default constructor for this class. */
#define TYPE_HAS_COMPLEX_DFLT(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_dflt)
/* Nonzero if TYPE has a trivial destructor. From [class.dtor]:
A destructor is trivial if it is an implicitly declared
destructor and if:
- all of the direct base classes of its class have trivial
destructors,
- for all of the non-static data members of its class that are
of class type (or array thereof), each such class has a
trivial destructor. */
#define TYPE_HAS_TRIVIAL_DESTRUCTOR(NODE) \
(!TYPE_HAS_NONTRIVIAL_DESTRUCTOR (NODE))
/* Nonzero for _TYPE node means that this type does not have a trivial
destructor. Therefore, destroying an object of this type will
involve a call to a destructor. This can apply to objects of
ARRAY_TYPE is the type of the elements needs a destructor. */
#define TYPE_HAS_NONTRIVIAL_DESTRUCTOR(NODE) \
(TYPE_LANG_FLAG_4 (NODE))
/* Nonzero for class type means that the default constructor is trivial. */
#define TYPE_HAS_TRIVIAL_DFLT(NODE) \
(TYPE_HAS_DEFAULT_CONSTRUCTOR (NODE) && ! TYPE_HAS_COMPLEX_DFLT (NODE))
/* Nonzero for class type means that copy initialization of this type can use
a bitwise copy. */
#define TYPE_HAS_TRIVIAL_COPY_CTOR(NODE) \
(TYPE_HAS_COPY_CTOR (NODE) && ! TYPE_HAS_COMPLEX_COPY_CTOR (NODE))
/* Nonzero for class type means that assignment of this type can use
a bitwise copy. */
#define TYPE_HAS_TRIVIAL_COPY_ASSIGN(NODE) \
(TYPE_HAS_COPY_ASSIGN (NODE) && ! TYPE_HAS_COMPLEX_COPY_ASSIGN (NODE))
/* Returns true if NODE is a pointer-to-data-member. */
#define TYPE_PTRDATAMEM_P(NODE) \
(TREE_CODE (NODE) == OFFSET_TYPE)
/* Returns true if NODE is a pointer. */
#define TYPE_PTR_P(NODE) \
(TREE_CODE (NODE) == POINTER_TYPE)
/* Returns true if NODE is an object type:
[basic.types]
An object type is a (possibly cv-qualified) type that is not a
function type, not a reference type, and not a void type.
Keep these checks in ascending order, for speed. */
#define TYPE_OBJ_P(NODE) \
(TREE_CODE (NODE) != REFERENCE_TYPE \
&& TREE_CODE (NODE) != VOID_TYPE \
&& TREE_CODE (NODE) != FUNCTION_TYPE \
&& TREE_CODE (NODE) != METHOD_TYPE)
/* Returns true if NODE is a pointer to an object. Keep these checks
in ascending tree code order. */
#define TYPE_PTROB_P(NODE) \
(TYPE_PTR_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE)))
/* Returns true if NODE is a reference to an object. Keep these checks
in ascending tree code order. */
#define TYPE_REF_OBJ_P(NODE) \
(TREE_CODE (NODE) == REFERENCE_TYPE && TYPE_OBJ_P (TREE_TYPE (NODE)))
/* Returns true if NODE is a pointer to an object, or a pointer to
void. Keep these checks in ascending tree code order. */
#define TYPE_PTROBV_P(NODE) \
(TYPE_PTR_P (NODE) \
&& !(TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE \
|| TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE))
/* Returns true if NODE is a pointer to function. */
#define TYPE_PTRFN_P(NODE) \
(TREE_CODE (NODE) == POINTER_TYPE \
&& TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE)
/* Returns true if NODE is a reference to function. */
#define TYPE_REFFN_P(NODE) \
(TREE_CODE (NODE) == REFERENCE_TYPE \
&& TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE)
/* Nonzero for _TYPE node means that this type is a pointer to member
function type. */
#define TYPE_PTRMEMFUNC_P(NODE) \
(TREE_CODE (NODE) == RECORD_TYPE \
&& TYPE_LANG_SPECIFIC (NODE) \
&& TYPE_PTRMEMFUNC_FLAG (NODE))
#define TYPE_PTRMEMFUNC_FLAG(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->ptrmemfunc_flag)
/* Returns true if NODE is a pointer-to-member. */
#define TYPE_PTRMEM_P(NODE) \
(TYPE_PTRDATAMEM_P (NODE) || TYPE_PTRMEMFUNC_P (NODE))
/* Returns true if NODE is a pointer or a pointer-to-member. */
#define TYPE_PTR_OR_PTRMEM_P(NODE) \
(TYPE_PTR_P (NODE) || TYPE_PTRMEM_P (NODE))
/* Indicates when overload resolution may resolve to a pointer to
member function. [expr.unary.op]/3 */
#define PTRMEM_OK_P(NODE) \
TREE_LANG_FLAG_0 (TREE_CHECK3 ((NODE), ADDR_EXPR, OFFSET_REF, SCOPE_REF))
/* Get the POINTER_TYPE to the METHOD_TYPE associated with this
pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true,
before using this macro. */
#define TYPE_PTRMEMFUNC_FN_TYPE(NODE) \
(TREE_TYPE (TYPE_FIELDS (NODE)))
/* Returns `A' for a type like `int (A::*)(double)' */
#define TYPE_PTRMEMFUNC_OBJECT_TYPE(NODE) \
TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE)))
/* These are use to manipulate the canonical RECORD_TYPE from the
hashed POINTER_TYPE, and can only be used on the POINTER_TYPE. */
#define TYPE_GET_PTRMEMFUNC_TYPE(NODE) \
(TYPE_LANG_SPECIFIC (NODE) ? LANG_TYPE_PTRMEM_CHECK (NODE)->record : NULL)
#define TYPE_SET_PTRMEMFUNC_TYPE(NODE, VALUE) \
do { \
if (TYPE_LANG_SPECIFIC (NODE) == NULL) \
{ \
TYPE_LANG_SPECIFIC (NODE) = ggc_alloc_cleared_lang_type \
(sizeof (struct lang_type_ptrmem)); \
TYPE_LANG_SPECIFIC (NODE)->u.ptrmem.h.is_lang_type_class = 0; \
} \
TYPE_LANG_SPECIFIC (NODE)->u.ptrmem.record = (VALUE); \
} while (0)
/* For a pointer-to-member type of the form `T X::*', this is `X'.
For a type like `void (X::*)() const', this type is `X', not `const
X'. To get at the `const X' you have to look at the
TYPE_PTRMEM_POINTED_TO_TYPE; there, the first parameter will have
type `const X*'. */
#define TYPE_PTRMEM_CLASS_TYPE(NODE) \
(TYPE_PTRDATAMEM_P (NODE) \
? TYPE_OFFSET_BASETYPE (NODE) \
: TYPE_PTRMEMFUNC_OBJECT_TYPE (NODE))
/* For a pointer-to-member type of the form `T X::*', this is `T'. */
#define TYPE_PTRMEM_POINTED_TO_TYPE(NODE) \
(TYPE_PTRDATAMEM_P (NODE) \
? TREE_TYPE (NODE) \
: TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE)))
/* For a pointer-to-member constant `X::Y' this is the RECORD_TYPE for
`X'. */
#define PTRMEM_CST_CLASS(NODE) \
TYPE_PTRMEM_CLASS_TYPE (TREE_TYPE (PTRMEM_CST_CHECK (NODE)))
/* For a pointer-to-member constant `X::Y' this is the _DECL for
`Y'. */
#define PTRMEM_CST_MEMBER(NODE) (((ptrmem_cst_t)PTRMEM_CST_CHECK (NODE))->member)
/* The expression in question for a TYPEOF_TYPE. */
#define TYPEOF_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (TYPEOF_TYPE_CHECK (NODE)))
/* The type in question for an UNDERLYING_TYPE. */
#define UNDERLYING_TYPE_TYPE(NODE) \
(TYPE_VALUES_RAW (UNDERLYING_TYPE_CHECK (NODE)))
/* The type in question for BASES. */
#define BASES_TYPE(NODE) \
(TYPE_VALUES_RAW (BASES_CHECK (NODE)))
#define BASES_DIRECT(NODE) \
TREE_LANG_FLAG_0 (BASES_CHECK (NODE))
/* The expression in question for a DECLTYPE_TYPE. */
#define DECLTYPE_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (DECLTYPE_TYPE_CHECK (NODE)))
/* Whether the DECLTYPE_TYPE_EXPR of NODE was originally parsed as an
id-expression or a member-access expression. When false, it was
parsed as a full expression. */
#define DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P(NODE) \
(DECLTYPE_TYPE_CHECK (NODE))->type_common.string_flag
/* These flags indicate that we want different semantics from normal
decltype: lambda capture just drops references, lambda proxies look
through implicit dereference. */
#define DECLTYPE_FOR_LAMBDA_CAPTURE(NODE) \
TREE_LANG_FLAG_0 (DECLTYPE_TYPE_CHECK (NODE))
#define DECLTYPE_FOR_LAMBDA_PROXY(NODE) \
TREE_LANG_FLAG_2 (DECLTYPE_TYPE_CHECK (NODE))
/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `extern' was
specified in its declaration. This can also be set for an
erroneously declared PARM_DECL. */
#define DECL_THIS_EXTERN(NODE) \
DECL_LANG_FLAG_2 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE))
/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `static' was
specified in its declaration. This can also be set for an
erroneously declared PARM_DECL. */
#define DECL_THIS_STATIC(NODE) \
DECL_LANG_FLAG_6 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a base class
of the parent object, as opposed to a member field. */
#define DECL_FIELD_IS_BASE(NODE) \
DECL_LANG_FLAG_6 (FIELD_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a simple (no
explicit initializer) lambda capture field, making it invisible to
name lookup in unevaluated contexts. */
#define DECL_NORMAL_CAPTURE_P(NODE) \
DECL_LANG_FLAG_7 (FIELD_DECL_CHECK (NODE))
/* Nonzero if TYPE is an anonymous union or struct type. We have to use a
flag for this because "A union for which objects or pointers are
declared is not an anonymous union" [class.union]. */
#define ANON_AGGR_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr)
#define SET_ANON_AGGR_TYPE_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr = 1)
/* Nonzero if TYPE is an anonymous union type. */
#define ANON_UNION_TYPE_P(NODE) \
(TREE_CODE (NODE) == UNION_TYPE && ANON_AGGR_TYPE_P (NODE))
/* Define fields and accessors for nodes representing declared names. */
#define TYPE_WAS_ANONYMOUS(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->was_anonymous)
/* C++: all of these are overloaded! These apply only to TYPE_DECLs. */
/* The format of each node in the DECL_FRIENDLIST is as follows:
The TREE_PURPOSE will be the name of a function, i.e., an
IDENTIFIER_NODE. The TREE_VALUE will be itself a TREE_LIST, whose
TREE_VALUEs are friends with the given name. */
#define DECL_FRIENDLIST(NODE) (DECL_INITIAL (NODE))
#define FRIEND_NAME(LIST) (TREE_PURPOSE (LIST))
#define FRIEND_DECLS(LIST) (TREE_VALUE (LIST))
/* The DECL_ACCESS, if non-NULL, is a TREE_LIST. The TREE_PURPOSE of
each node is a type; the TREE_VALUE is the access granted for this
DECL in that type. The DECL_ACCESS is set by access declarations.
For example, if a member that would normally be public in a
derived class is made protected, then the derived class and the
protected_access_node will appear in the DECL_ACCESS for the node. */
#define DECL_ACCESS(NODE) (LANG_DECL_U2_CHECK (NODE, 0)->access)
/* Nonzero if the FUNCTION_DECL is a global constructor. */
#define DECL_GLOBAL_CTOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->global_ctor_p)
/* Nonzero if the FUNCTION_DECL is a global destructor. */
#define DECL_GLOBAL_DTOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->global_dtor_p)
/* Accessor macros for C++ template decl nodes. */
/* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node
is a INT_CST whose TREE_INT_CST_LOW indicates the level of the
template parameters, with 1 being the outermost set of template
parameters. The TREE_VALUE is a vector, whose elements are the
template parameters at each level. Each element in the vector is a
TREE_LIST, whose TREE_VALUE is a PARM_DECL (if the parameter is a
non-type parameter), or a TYPE_DECL (if the parameter is a type
parameter). The TREE_PURPOSE is the default value, if any. The
TEMPLATE_PARM_INDEX for the parameter is available as the
DECL_INITIAL (for a PARM_DECL) or as the TREE_TYPE (for a
TYPE_DECL). */
#define DECL_TEMPLATE_PARMS(NODE) DECL_NON_COMMON_CHECK (NODE)->decl_non_common.arguments
#define DECL_INNERMOST_TEMPLATE_PARMS(NODE) \
INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (NODE))
#define DECL_NTPARMS(NODE) \
TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (NODE))
/* For function, method, class-data templates. */
#define DECL_TEMPLATE_RESULT(NODE) DECL_RESULT_FLD (NODE)
/* For a function template at namespace scope, DECL_TEMPLATE_INSTANTIATIONS
lists all instantiations and specializations of the function so that
tsubst_friend_function can reassign them to another template if we find
that the namespace-scope template is really a partial instantiation of a
friend template.
For a class template the DECL_TEMPLATE_INSTANTIATIONS lists holds
all instantiations and specializations of the class type, including
partial instantiations and partial specializations, so that if we
explicitly specialize a partial instantiation we can walk the list
in maybe_process_partial_specialization and reassign them or complain
as appropriate.
In both cases, the TREE_PURPOSE of each node contains the arguments
used; the TREE_VALUE contains the generated variable. The template
arguments are always complete. For example, given:
template <class T> struct S1 {
template <class U> struct S2 {};
template <class U> struct S2<U*> {};
};
the record for the partial specialization will contain, as its
argument list, { {T}, {U*} }, and will be on the
DECL_TEMPLATE_INSTANTIATIONS list for `template <class T> template
<class U> struct S1<T>::S2'.
This list is not used for other templates. */
#define DECL_TEMPLATE_INSTANTIATIONS(NODE) DECL_VINDEX (NODE)
/* For a class template, this list contains the partial
specializations of this template. (Full specializations are not
recorded on this list.) The TREE_PURPOSE holds the arguments used
in the partial specialization (e.g., for `template <class T> struct
S<T*, int>' this will be `T*'.) The arguments will also include
any outer template arguments. The TREE_VALUE holds the innermost
template parameters for the specialization (e.g., `T' in the
example above.) The TREE_TYPE is the _TYPE node for the partial
specialization.
This list is not used for other templates. */
#define DECL_TEMPLATE_SPECIALIZATIONS(NODE) DECL_SIZE (NODE)
/* Nonzero for a DECL which is actually a template parameter. Keep
these checks in ascending tree code order. */
#define DECL_TEMPLATE_PARM_P(NODE) \
(DECL_LANG_FLAG_0 (NODE) \
&& (TREE_CODE (NODE) == CONST_DECL \
|| TREE_CODE (NODE) == PARM_DECL \
|| TREE_CODE (NODE) == TYPE_DECL \
|| TREE_CODE (NODE) == TEMPLATE_DECL))
/* Mark NODE as a template parameter. */
#define SET_DECL_TEMPLATE_PARM_P(NODE) \
(DECL_LANG_FLAG_0 (NODE) = 1)
/* Nonzero if NODE is a template template parameter. */
#define DECL_TEMPLATE_TEMPLATE_PARM_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL && DECL_TEMPLATE_PARM_P (NODE))
/* Nonzero if NODE is a TEMPLATE_DECL representing an
UNBOUND_CLASS_TEMPLATE tree node. */
#define DECL_UNBOUND_CLASS_TEMPLATE_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL && !DECL_TEMPLATE_RESULT (NODE))
#define DECL_FUNCTION_TEMPLATE_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL \
&& !DECL_UNBOUND_CLASS_TEMPLATE_P (NODE) \
&& TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == FUNCTION_DECL)
/* Nonzero for a DECL that represents a class template or alias
template. */
#define DECL_TYPE_TEMPLATE_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL \
&& DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \
&& TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == TYPE_DECL)
/* Nonzero for a DECL that represents a class template. */
#define DECL_CLASS_TEMPLATE_P(NODE) \
(DECL_TYPE_TEMPLATE_P (NODE) \
&& DECL_IMPLICIT_TYPEDEF_P (DECL_TEMPLATE_RESULT (NODE)))
/* Nonzero for a TEMPLATE_DECL that represents an alias template. */
#define DECL_ALIAS_TEMPLATE_P(NODE) \
(DECL_TYPE_TEMPLATE_P (NODE) \
&& !DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (NODE)))
/* Nonzero for a NODE which declares a type. */
#define DECL_DECLARES_TYPE_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL || DECL_TYPE_TEMPLATE_P (NODE))
/* Nonzero if NODE declares a function. */
#define DECL_DECLARES_FUNCTION_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (NODE))
/* Nonzero if NODE is the typedef implicitly generated for a type when
the type is declared. In C++, `struct S {};' is roughly
equivalent to `struct S {}; typedef struct S S;' in C.
DECL_IMPLICIT_TYPEDEF_P will hold for the typedef indicated in this
example. In C++, there is a second implicit typedef for each
class, in the scope of `S' itself, so that you can say `S::S'.
DECL_SELF_REFERENCE_P will hold for that second typedef. */
#define DECL_IMPLICIT_TYPEDEF_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_2 (NODE))
#define SET_DECL_IMPLICIT_TYPEDEF_P(NODE) \
(DECL_LANG_FLAG_2 (NODE) = 1)
#define DECL_SELF_REFERENCE_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_4 (NODE))
#define SET_DECL_SELF_REFERENCE_P(NODE) \
(DECL_LANG_FLAG_4 (NODE) = 1)
/* A `primary' template is one that has its own template header. A
member function of a class template is a template, but not primary.
A member template is primary. Friend templates are primary, too. */
/* Returns the primary template corresponding to these parameters. */
#define DECL_PRIMARY_TEMPLATE(NODE) \
(TREE_TYPE (DECL_INNERMOST_TEMPLATE_PARMS (NODE)))
/* Returns nonzero if NODE is a primary template. */
#define PRIMARY_TEMPLATE_P(NODE) (DECL_PRIMARY_TEMPLATE (NODE) == (NODE))
/* Nonzero iff NODE is a specialization of a template. The value
indicates the type of specializations:
1=implicit instantiation
2=partial or explicit specialization, e.g.:
template <> int min<int> (int, int),
3=explicit instantiation, e.g.:
template int min<int> (int, int);
Note that NODE will be marked as a specialization even if the
template it is instantiating is not a primary template. For
example, given:
template <typename T> struct O {
void f();
struct I {};
};
both O<int>::f and O<int>::I will be marked as instantiations.
If DECL_USE_TEMPLATE is nonzero, then DECL_TEMPLATE_INFO will also
be non-NULL. */
#define DECL_USE_TEMPLATE(NODE) (DECL_LANG_SPECIFIC (NODE)->u.base.use_template)
/* Like DECL_USE_TEMPLATE, but for class types. */
#define CLASSTYPE_USE_TEMPLATE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->use_template)
/* True if NODE is a specialization of a primary template. */
#define CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P(NODE) \
(CLASS_TYPE_P (NODE) \
&& CLASSTYPE_USE_TEMPLATE (NODE) \
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE)))
#define DECL_TEMPLATE_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) & 1)
#define CLASSTYPE_TEMPLATE_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) & 1)
#define DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) == 2)
#define SET_DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) = 2)
/* Returns true for an explicit or partial specialization of a class
template. */
#define CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 2)
#define SET_CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 2)
#define DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 1)
#define SET_DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 1)
#define CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 1)
#define SET_CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 1)
#define DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 3)
#define SET_DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 3)
#define CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 3)
#define SET_CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 3)
/* Nonzero if DECL is a friend function which is an instantiation
from the point of view of the compiler, but not from the point of
view of the language. For example given:
template <class T> struct S { friend void f(T) {}; };
the declaration of `void f(int)' generated when S<int> is
instantiated will not be a DECL_TEMPLATE_INSTANTIATION, but will be
a DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION. */
#define DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION(DECL) \
(DECL_TEMPLATE_INFO (DECL) && !DECL_USE_TEMPLATE (DECL))
/* Nonzero if DECL is a function generated from a function 'temploid',
i.e. template, member of class template, or dependent friend. */
#define DECL_TEMPLOID_INSTANTIATION(DECL) \
(DECL_TEMPLATE_INSTANTIATION (DECL) \
|| DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (DECL))
/* Nonzero if DECL is either defined implicitly by the compiler or
generated from a temploid. */
#define DECL_GENERATED_P(DECL) \
(DECL_TEMPLOID_INSTANTIATION (DECL) || DECL_DEFAULTED_FN (DECL))
/* Nonzero iff we are currently processing a declaration for an
entity with its own template parameter list, and which is not a
full specialization. */
#define PROCESSING_REAL_TEMPLATE_DECL_P() \
(processing_template_decl > template_class_depth (current_scope ()))
/* Nonzero if this VAR_DECL or FUNCTION_DECL has already been
instantiated, i.e. its definition has been generated from the
pattern given in the template. */
#define DECL_TEMPLATE_INSTANTIATED(NODE) \
DECL_LANG_FLAG_1 (VAR_OR_FUNCTION_DECL_CHECK (NODE))
/* We know what we're doing with this decl now. */
#define DECL_INTERFACE_KNOWN(NODE) DECL_LANG_FLAG_5 (NODE)
/* DECL_EXTERNAL must be set on a decl until the decl is actually emitted,
so that assemble_external will work properly. So we have this flag to
tell us whether the decl is really not external.
This flag does not indicate whether or not the decl is defined in the
current translation unit; it indicates whether or not we should emit the
decl at the end of compilation if it is defined and needed. */
#define DECL_NOT_REALLY_EXTERN(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.not_really_extern)
#define DECL_REALLY_EXTERN(NODE) \
(DECL_EXTERNAL (NODE) && ! DECL_NOT_REALLY_EXTERN (NODE))
/* A thunk is a stub function.
A thunk is an alternate entry point for an ordinary FUNCTION_DECL.
The address of the ordinary FUNCTION_DECL is given by the
DECL_INITIAL, which is always an ADDR_EXPR whose operand is a
FUNCTION_DECL. The job of the thunk is to either adjust the this
pointer before transferring control to the FUNCTION_DECL, or call
FUNCTION_DECL and then adjust the result value. Note, the result
pointer adjusting thunk must perform a call to the thunked
function, (or be implemented via passing some invisible parameter
to the thunked function, which is modified to perform the
adjustment just before returning).
A thunk may perform either, or both, of the following operations:
o Adjust the this or result pointer by a constant offset.
o Adjust the this or result pointer by looking up a vcall or vbase offset
in the vtable.
A this pointer adjusting thunk converts from a base to a derived
class, and hence adds the offsets. A result pointer adjusting thunk
converts from a derived class to a base, and hence subtracts the
offsets. If both operations are performed, then the constant
adjustment is performed first for this pointer adjustment and last
for the result pointer adjustment.
The constant adjustment is given by THUNK_FIXED_OFFSET. If the
vcall or vbase offset is required, THUNK_VIRTUAL_OFFSET is
used. For this pointer adjusting thunks, it is the vcall offset
into the vtable. For result pointer adjusting thunks it is the
binfo of the virtual base to convert to. Use that binfo's vbase
offset.
It is possible to have equivalent covariant thunks. These are
distinct virtual covariant thunks whose vbase offsets happen to
have the same value. THUNK_ALIAS is used to pick one as the
canonical thunk, which will get all the this pointer adjusting
thunks attached to it. */
/* An integer indicating how many bytes should be subtracted from the
this or result pointer when this function is called. */
#define THUNK_FIXED_OFFSET(DECL) \
(DECL_LANG_SPECIFIC (THUNK_FUNCTION_CHECK (DECL))->u.fn.u5.fixed_offset)
/* A tree indicating how to perform the virtual adjustment. For a this
adjusting thunk it is the number of bytes to be added to the vtable
to find the vcall offset. For a result adjusting thunk, it is the
binfo of the relevant virtual base. If NULL, then there is no
virtual adjust. (The vptr is always located at offset zero from
the this or result pointer.) (If the covariant type is within the
class hierarchy being laid out, the vbase index is not yet known
at the point we need to create the thunks, hence the need to use
binfos.) */
#define THUNK_VIRTUAL_OFFSET(DECL) \
(LANG_DECL_U2_CHECK (FUNCTION_DECL_CHECK (DECL), 0)->access)
/* A thunk which is equivalent to another thunk. */
#define THUNK_ALIAS(DECL) \
(DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->u.min.template_info)
/* For thunk NODE, this is the FUNCTION_DECL thunked to. It is
possible for the target to be a thunk too. */
#define THUNK_TARGET(NODE) \
(LANG_DECL_FN_CHECK (NODE)->befriending_classes)
/* True for a SCOPE_REF iff the "template" keyword was used to
indicate that the qualified name denotes a template. */
#define QUALIFIED_NAME_IS_TEMPLATE(NODE) \
(TREE_LANG_FLAG_1 (SCOPE_REF_CHECK (NODE)))
/* True for an OMP_ATOMIC that has dependent parameters. These are stored
as an expr in operand 1, and integer_zero_node in operand 0. */
#define OMP_ATOMIC_DEPENDENT_P(NODE) \
(TREE_CODE (TREE_OPERAND (OMP_ATOMIC_CHECK (NODE), 0)) == INTEGER_CST)
/* Used while gimplifying continue statements bound to OMP_FOR nodes. */
#define OMP_FOR_GIMPLIFYING_P(NODE) \
(TREE_LANG_FLAG_0 (OMP_FOR_CHECK (NODE)))
/* A language-specific token attached to the OpenMP data clauses to
hold code (or code fragments) related to ctors, dtors, and op=.
See semantics.c for details. */
#define CP_OMP_CLAUSE_INFO(NODE) \
TREE_TYPE (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_PRIVATE, \
OMP_CLAUSE_COPYPRIVATE))
/* Nonzero if this transaction expression's body contains statements. */
#define TRANSACTION_EXPR_IS_STMT(NODE) \
TREE_LANG_FLAG_0 (TRANSACTION_EXPR_CHECK (NODE))
/* These macros provide convenient access to the various _STMT nodes
created when parsing template declarations. */
#define TRY_STMTS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 0)
#define TRY_HANDLERS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 1)
#define EH_SPEC_STMTS(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 0)
#define EH_SPEC_RAISES(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 1)
#define USING_STMT_NAMESPACE(NODE) TREE_OPERAND (USING_STMT_CHECK (NODE), 0)
/* Nonzero if this try block is a function try block. */
#define FN_TRY_BLOCK_P(NODE) TREE_LANG_FLAG_3 (TRY_BLOCK_CHECK (NODE))
#define HANDLER_PARMS(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 0)
#define HANDLER_BODY(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 1)
#define HANDLER_TYPE(NODE) TREE_TYPE (HANDLER_CHECK (NODE))
/* CLEANUP_STMT accessors. The statement(s) covered, the cleanup to run
and the VAR_DECL for which this cleanup exists. */
#define CLEANUP_BODY(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 0)
#define CLEANUP_EXPR(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 1)
#define CLEANUP_DECL(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 2)
/* IF_STMT accessors. These give access to the condition of the if
statement, the then block of the if statement, and the else block
of the if statement if it exists. */
#define IF_COND(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 0)
#define THEN_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 1)
#define ELSE_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 2)
#define IF_SCOPE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 3)
/* WHILE_STMT accessors. These give access to the condition of the
while statement and the body of the while statement, respectively. */
#define WHILE_COND(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 0)
#define WHILE_BODY(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 1)
/* DO_STMT accessors. These give access to the condition of the do
statement and the body of the do statement, respectively. */
#define DO_COND(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 0)
#define DO_BODY(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 1)
/* FOR_STMT accessors. These give access to the init statement,
condition, update expression, and body of the for statement,
respectively. */
#define FOR_INIT_STMT(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 0)
#define FOR_COND(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 1)
#define FOR_EXPR(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 2)
#define FOR_BODY(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 3)
#define FOR_SCOPE(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 4)
/* RANGE_FOR_STMT accessors. These give access to the declarator,
expression, body, and scope of the statement, respectively. */
#define RANGE_FOR_DECL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 0)
#define RANGE_FOR_EXPR(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 1)
#define RANGE_FOR_BODY(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 2)
#define RANGE_FOR_SCOPE(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 3)
#define SWITCH_STMT_COND(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 0)
#define SWITCH_STMT_BODY(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 1)
#define SWITCH_STMT_TYPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 2)
#define SWITCH_STMT_SCOPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 3)
/* STMT_EXPR accessor. */
#define STMT_EXPR_STMT(NODE) TREE_OPERAND (STMT_EXPR_CHECK (NODE), 0)
/* EXPR_STMT accessor. This gives the expression associated with an
expression statement. */
#define EXPR_STMT_EXPR(NODE) TREE_OPERAND (EXPR_STMT_CHECK (NODE), 0)
/* True if this TARGET_EXPR was created by build_cplus_new, and so we can
discard it if it isn't useful. */
#define TARGET_EXPR_IMPLICIT_P(NODE) \
TREE_LANG_FLAG_0 (TARGET_EXPR_CHECK (NODE))
/* True if this TARGET_EXPR is the result of list-initialization of a
temporary. */
#define TARGET_EXPR_LIST_INIT_P(NODE) \
TREE_LANG_FLAG_1 (TARGET_EXPR_CHECK (NODE))
/* True if this TARGET_EXPR expresses direct-initialization of an object
to be named later. */
#define TARGET_EXPR_DIRECT_INIT_P(NODE) \
TREE_LANG_FLAG_2 (TARGET_EXPR_CHECK (NODE))
/* True if EXPR expresses direct-initialization of a TYPE. */
#define DIRECT_INIT_EXPR_P(TYPE,EXPR) \
(TREE_CODE (EXPR) == TARGET_EXPR && TREE_LANG_FLAG_2 (EXPR) \
&& same_type_ignoring_top_level_qualifiers_p (TYPE, TREE_TYPE (EXPR)))
/* True if this CONVERT_EXPR is for a conversion to virtual base in
an NSDMI, and should be re-evaluated when used in a constructor. */
#define CONVERT_EXPR_VBASE_PATH(NODE) \
TREE_LANG_FLAG_0 (CONVERT_EXPR_CHECK (NODE))
/* True if SIZEOF_EXPR argument is type. */
#define SIZEOF_EXPR_TYPE_P(NODE) \
TREE_LANG_FLAG_0 (SIZEOF_EXPR_CHECK (NODE))
/* An enumeration of the kind of tags that C++ accepts. */
enum tag_types {
none_type = 0, /* Not a tag type. */
record_type, /* "struct" types. */
class_type, /* "class" types. */
union_type, /* "union" types. */
enum_type, /* "enum" types. */
typename_type /* "typename" types. */
};
/* The various kinds of lvalues we distinguish. */
enum cp_lvalue_kind_flags {
clk_none = 0, /* Things that are not an lvalue. */
clk_ordinary = 1, /* An ordinary lvalue. */
clk_rvalueref = 2,/* An xvalue (rvalue formed using an rvalue reference) */
clk_class = 4, /* A prvalue of class-type. */
clk_bitfield = 8, /* An lvalue for a bit-field. */
clk_packed = 16 /* An lvalue for a packed field. */
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum cp_lvalue_kind_flags. */
typedef int cp_lvalue_kind;
/* Various kinds of template specialization, instantiation, etc. */
typedef enum tmpl_spec_kind {
tsk_none, /* Not a template at all. */
tsk_invalid_member_spec, /* An explicit member template
specialization, but the enclosing
classes have not all been explicitly
specialized. */
tsk_invalid_expl_inst, /* An explicit instantiation containing
template parameter lists. */
tsk_excessive_parms, /* A template declaration with too many
template parameter lists. */
tsk_insufficient_parms, /* A template declaration with too few
parameter lists. */
tsk_template, /* A template declaration. */
tsk_expl_spec, /* An explicit specialization. */
tsk_expl_inst /* An explicit instantiation. */
} tmpl_spec_kind;
/* The various kinds of access. BINFO_ACCESS depends on these being
two bit quantities. The numerical values are important; they are
used to initialize RTTI data structures, so changing them changes
the ABI. */
typedef enum access_kind {
ak_none = 0, /* Inaccessible. */
ak_public = 1, /* Accessible, as a `public' thing. */
ak_protected = 2, /* Accessible, as a `protected' thing. */
ak_private = 3 /* Accessible, as a `private' thing. */
} access_kind;
/* The various kinds of special functions. If you add to this list,
you should update special_function_p as well. */
typedef enum special_function_kind {
sfk_none = 0, /* Not a special function. This enumeral
must have value zero; see
special_function_p. */
sfk_constructor, /* A constructor. */
sfk_copy_constructor, /* A copy constructor. */
sfk_move_constructor, /* A move constructor. */
sfk_copy_assignment, /* A copy assignment operator. */
sfk_move_assignment, /* A move assignment operator. */
sfk_destructor, /* A destructor. */
sfk_complete_destructor, /* A destructor for complete objects. */
sfk_base_destructor, /* A destructor for base subobjects. */
sfk_deleting_destructor, /* A destructor for complete objects that
deletes the object after it has been
destroyed. */
sfk_conversion, /* A conversion operator. */
sfk_inheriting_constructor /* An inheriting constructor */
} special_function_kind;
/* The various kinds of linkage. From [basic.link],
A name is said to have linkage when it might denote the same
object, reference, function, type, template, namespace or value
as a name introduced in another scope:
-- When a name has external linkage, the entity it denotes can
be referred to from scopes of other translation units or from
other scopes of the same translation unit.
-- When a name has internal linkage, the entity it denotes can
be referred to by names from other scopes in the same
translation unit.
-- When a name has no linkage, the entity it denotes cannot be
referred to by names from other scopes. */
typedef enum linkage_kind {
lk_none, /* No linkage. */
lk_internal, /* Internal linkage. */
lk_external /* External linkage. */
} linkage_kind;
typedef enum duration_kind {
dk_static,
dk_thread,
dk_auto,
dk_dynamic
} duration_kind;
/* Bitmask flags to control type substitution. */
enum tsubst_flags {
tf_none = 0, /* nothing special */
tf_error = 1 << 0, /* give error messages */
tf_warning = 1 << 1, /* give warnings too */
tf_ignore_bad_quals = 1 << 2, /* ignore bad cvr qualifiers */
tf_keep_type_decl = 1 << 3, /* retain typedef type decls
(make_typename_type use) */
tf_ptrmem_ok = 1 << 4, /* pointers to member ok (internal
instantiate_type use) */
tf_user = 1 << 5, /* found template must be a user template
(lookup_template_class use) */
tf_conv = 1 << 6, /* We are determining what kind of
conversion might be permissible,
not actually performing the
conversion. */
tf_decltype = 1 << 7, /* We are the operand of decltype.
Used to implement the special rules
for calls in decltype (5.2.2/11). */
tf_partial = 1 << 8, /* Doing initial explicit argument
substitution in fn_type_unification. */
/* Convenient substitution flags combinations. */
tf_warning_or_error = tf_warning | tf_error
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum tsubst_flags. */
typedef int tsubst_flags_t;
/* The kind of checking we can do looking in a class hierarchy. */
enum base_access_flags {
ba_any = 0, /* Do not check access, allow an ambiguous base,
prefer a non-virtual base */
ba_unique = 1 << 0, /* Must be a unique base. */
ba_check_bit = 1 << 1, /* Check access. */
ba_check = ba_unique | ba_check_bit,
ba_ignore_scope = 1 << 2 /* Ignore access allowed by local scope. */
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum base_access_flags. */
typedef int base_access;
/* The various kinds of access check during parsing. */
typedef enum deferring_kind {
dk_no_deferred = 0, /* Check access immediately */
dk_deferred = 1, /* Deferred check */
dk_no_check = 2 /* No access check */
} deferring_kind;
/* The kind of base we can find, looking in a class hierarchy.
Values <0 indicate we failed. */
typedef enum base_kind {
bk_inaccessible = -3, /* The base is inaccessible */
bk_ambig = -2, /* The base is ambiguous */
bk_not_base = -1, /* It is not a base */
bk_same_type = 0, /* It is the same type */
bk_proper_base = 1, /* It is a proper base */
bk_via_virtual = 2 /* It is a proper base, but via a virtual
path. This might not be the canonical
binfo. */
} base_kind;
/* Node for "pointer to (virtual) function".
This may be distinct from ptr_type_node so gdb can distinguish them. */
#define vfunc_ptr_type_node vtable_entry_type
/* For building calls to `delete'. */
extern GTY(()) tree integer_two_node;
/* The number of function bodies which we are currently processing.
(Zero if we are at namespace scope, one inside the body of a
function, two inside the body of a function in a local class, etc.) */
extern int function_depth;
/* Nonzero if we are inside eq_specializations, which affects comparison of
PARM_DECLs in cp_tree_equal. */
extern int comparing_specializations;
/* In parser.c. */
/* Nonzero if we are parsing an unevaluated operand: an operand to
sizeof, typeof, or alignof. This is a count since operands to
sizeof can be nested. */
extern int cp_unevaluated_operand;
extern tree cp_convert_range_for (tree, tree, tree);
/* in pt.c */
/* These values are used for the `STRICT' parameter to type_unification and
fn_type_unification. Their meanings are described with the
documentation for fn_type_unification. */
typedef enum unification_kind_t {
DEDUCE_CALL,
DEDUCE_CONV,
DEDUCE_EXACT
} unification_kind_t;
/* in class.c */
extern int current_class_depth;
/* An array of all local classes present in this translation unit, in
declaration order. */
extern GTY(()) vec<tree, va_gc> *local_classes;
/* Here's where we control how name mangling takes place. */
/* Cannot use '$' up front, because this confuses gdb
(names beginning with '$' are gdb-local identifiers).
Note that all forms in which the '$' is significant are long enough
for direct indexing (meaning that if we know there is a '$'
at a particular location, we can index into the string at
any other location that provides distinguishing characters). */
/* Define NO_DOT_IN_LABEL in your favorite tm file if your assembler
doesn't allow '.' in symbol names. */
#ifndef NO_DOT_IN_LABEL
#define JOINER '.'
#define AUTO_TEMP_NAME "_.tmp_"
#define VFIELD_BASE ".vf"
#define VFIELD_NAME "_vptr."
#define VFIELD_NAME_FORMAT "_vptr.%s"
#define ANON_AGGRNAME_FORMAT "._%d"
#else /* NO_DOT_IN_LABEL */
#ifndef NO_DOLLAR_IN_LABEL
#define JOINER '$'
#define AUTO_TEMP_NAME "_$tmp_"
#define VFIELD_BASE "$vf"
#define VFIELD_NAME "_vptr$"
#define VFIELD_NAME_FORMAT "_vptr$%s"
#define ANON_AGGRNAME_FORMAT "$_%d"
#else /* NO_DOLLAR_IN_LABEL */
#define AUTO_TEMP_NAME "__tmp_"
#define TEMP_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, \
sizeof (AUTO_TEMP_NAME) - 1))
#define VTABLE_NAME "__vt_"
#define VTABLE_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VTABLE_NAME, \
sizeof (VTABLE_NAME) - 1))
#define VFIELD_BASE "__vfb"
#define VFIELD_NAME "__vptr_"
#define VFIELD_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, \
sizeof (VFIELD_NAME) - 1))
#define VFIELD_NAME_FORMAT "__vptr_%s"
#define ANON_AGGRNAME_PREFIX "__anon_"
#define ANON_AGGRNAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), ANON_AGGRNAME_PREFIX, \
sizeof (ANON_AGGRNAME_PREFIX) - 1))
#define ANON_AGGRNAME_FORMAT "__anon_%d"
#endif /* NO_DOLLAR_IN_LABEL */
#endif /* NO_DOT_IN_LABEL */
#define THIS_NAME "this"
#define IN_CHARGE_NAME "__in_chrg"
#define VTBL_PTR_TYPE "__vtbl_ptr_type"
#define VTABLE_DELTA_NAME "__delta"
#define VTABLE_PFN_NAME "__pfn"
#define LAMBDANAME_PREFIX "__lambda"
#define LAMBDANAME_FORMAT LAMBDANAME_PREFIX "%d"
#define UDLIT_OP_ANSI_PREFIX "operator\"\" "
#define UDLIT_OP_ANSI_FORMAT UDLIT_OP_ANSI_PREFIX "%s"
#define UDLIT_OP_MANGLED_PREFIX "li"
#define UDLIT_OP_MANGLED_FORMAT UDLIT_OP_MANGLED_PREFIX "%s"
#define UDLIT_OPER_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), \
UDLIT_OP_ANSI_PREFIX, \
sizeof (UDLIT_OP_ANSI_PREFIX) - 1))
#define UDLIT_OP_SUFFIX(ID_NODE) \
(IDENTIFIER_POINTER (ID_NODE) + sizeof (UDLIT_OP_ANSI_PREFIX) - 1)
#if !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL)
#define VTABLE_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == 'v' \
&& IDENTIFIER_POINTER (ID_NODE)[2] == 't' \
&& IDENTIFIER_POINTER (ID_NODE)[3] == JOINER)
#define TEMP_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, sizeof (AUTO_TEMP_NAME)-1))
#define VFIELD_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, sizeof(VFIELD_NAME)-1))
/* For anonymous aggregate types, we need some sort of name to
hold on to. In practice, this should not appear, but it should
not be harmful if it does. */
#define ANON_AGGRNAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[0] == JOINER \
&& IDENTIFIER_POINTER (ID_NODE)[1] == '_')
#endif /* !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) */
/* Nonzero if we're done parsing and into end-of-file activities. */
extern int at_eof;
/* A list of namespace-scope objects which have constructors or
destructors which reside in the global scope. The decl is stored
in the TREE_VALUE slot and the initializer is stored in the
TREE_PURPOSE slot. */
extern GTY(()) tree static_aggregates;
/* Likewise, for thread local storage. */
extern GTY(()) tree tls_aggregates;
enum overload_flags { NO_SPECIAL = 0, DTOR_FLAG, TYPENAME_FLAG };
/* These are uses as bits in flags passed to various functions to
control their behavior. Despite the LOOKUP_ prefix, many of these
do not control name lookup. ??? Functions using these flags should
probably be modified to accept explicit boolean flags for the
behaviors relevant to them. */
/* Check for access violations. */
#define LOOKUP_PROTECT (1 << 0)
#define LOOKUP_NORMAL (LOOKUP_PROTECT)
/* Even if the function found by lookup is a virtual function, it
should be called directly. */
#define LOOKUP_NONVIRTUAL (1 << 1)
/* Non-converting (i.e., "explicit") constructors are not tried. This flag
indicates that we are not performing direct-initialization. */
#define LOOKUP_ONLYCONVERTING (1 << 2)
#define LOOKUP_IMPLICIT (LOOKUP_NORMAL | LOOKUP_ONLYCONVERTING)
/* If a temporary is created, it should be created so that it lives
as long as the current variable bindings; otherwise it only lives
until the end of the complete-expression. It also forces
direct-initialization in cases where other parts of the compiler
have already generated a temporary, such as reference
initialization and the catch parameter. */
#define DIRECT_BIND (1 << 3)
/* We're performing a user-defined conversion, so more user-defined
conversions are not permitted (only built-in conversions). */
#define LOOKUP_NO_CONVERSION (1 << 4)
/* The user has explicitly called a destructor. (Therefore, we do
not need to check that the object is non-NULL before calling the
destructor.) */
#define LOOKUP_DESTRUCTOR (1 << 5)
/* Do not permit references to bind to temporaries. */
#define LOOKUP_NO_TEMP_BIND (1 << 6)
/* Do not accept objects, and possibly namespaces. */
#define LOOKUP_PREFER_TYPES (1 << 7)
/* Do not accept objects, and possibly types. */
#define LOOKUP_PREFER_NAMESPACES (1 << 8)
/* Accept types or namespaces. */
#define LOOKUP_PREFER_BOTH (LOOKUP_PREFER_TYPES | LOOKUP_PREFER_NAMESPACES)
/* Return friend declarations and un-declared builtin functions.
(Normally, these entities are registered in the symbol table, but
not found by lookup.) */
#define LOOKUP_HIDDEN (LOOKUP_PREFER_NAMESPACES << 1)
/* Prefer that the lvalue be treated as an rvalue. */
#define LOOKUP_PREFER_RVALUE (LOOKUP_HIDDEN << 1)
/* We're inside an init-list, so narrowing conversions are ill-formed. */
#define LOOKUP_NO_NARROWING (LOOKUP_PREFER_RVALUE << 1)
/* We're looking up a constructor for list-initialization. */
#define LOOKUP_LIST_INIT_CTOR (LOOKUP_NO_NARROWING << 1)
/* This is the first parameter of a copy constructor. */
#define LOOKUP_COPY_PARM (LOOKUP_LIST_INIT_CTOR << 1)
/* We only want to consider list constructors. */
#define LOOKUP_LIST_ONLY (LOOKUP_COPY_PARM << 1)
/* Return after determining which function to call and checking access.
Used by sythesized_method_walk to determine which functions will
be called to initialize subobjects, in order to determine exception
specification and possible implicit delete.
This is kind of a hack, but exiting early avoids problems with trying
to perform argument conversions when the class isn't complete yet. */
#define LOOKUP_SPECULATIVE (LOOKUP_LIST_ONLY << 1)
/* Used by calls from defaulted functions to limit the overload set to avoid
cycles trying to declare them (core issue 1092). */
#define LOOKUP_DEFAULTED (LOOKUP_SPECULATIVE << 1)
/* Used in calls to store_init_value to suppress its usual call to
digest_init. */
#define LOOKUP_ALREADY_DIGESTED (LOOKUP_DEFAULTED << 1)
/* An instantiation with explicit template arguments. */
#define LOOKUP_EXPLICIT_TMPL_ARGS (LOOKUP_ALREADY_DIGESTED << 1)
/* Like LOOKUP_NO_TEMP_BIND, but also prevent binding to xvalues. */
#define LOOKUP_NO_RVAL_BIND (LOOKUP_EXPLICIT_TMPL_ARGS << 1)
#define LOOKUP_NAMESPACES_ONLY(F) \
(((F) & LOOKUP_PREFER_NAMESPACES) && !((F) & LOOKUP_PREFER_TYPES))
#define LOOKUP_TYPES_ONLY(F) \
(!((F) & LOOKUP_PREFER_NAMESPACES) && ((F) & LOOKUP_PREFER_TYPES))
#define LOOKUP_QUALIFIERS_ONLY(F) ((F) & LOOKUP_PREFER_BOTH)
/* These flags are used by the conversion code.
CONV_IMPLICIT : Perform implicit conversions (standard and user-defined).
CONV_STATIC : Perform the explicit conversions for static_cast.
CONV_CONST : Perform the explicit conversions for const_cast.
CONV_REINTERPRET: Perform the explicit conversions for reinterpret_cast.
CONV_PRIVATE : Perform upcasts to private bases.
CONV_FORCE_TEMP : Require a new temporary when converting to the same
aggregate type. */
#define CONV_IMPLICIT 1
#define CONV_STATIC 2
#define CONV_CONST 4
#define CONV_REINTERPRET 8
#define CONV_PRIVATE 16
/* #define CONV_NONCONVERTING 32 */
#define CONV_FORCE_TEMP 64
#define CONV_OLD_CONVERT (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
| CONV_REINTERPRET)
#define CONV_C_CAST (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
| CONV_REINTERPRET | CONV_PRIVATE | CONV_FORCE_TEMP)
/* Used by build_expr_type_conversion to indicate which types are
acceptable as arguments to the expression under consideration. */
#define WANT_INT 1 /* integer types, including bool */
#define WANT_FLOAT 2 /* floating point types */
#define WANT_ENUM 4 /* enumerated types */
#define WANT_POINTER 8 /* pointer types */
#define WANT_NULL 16 /* null pointer constant */
#define WANT_VECTOR_OR_COMPLEX 32 /* vector or complex types */
#define WANT_ARITH (WANT_INT | WANT_FLOAT | WANT_VECTOR_OR_COMPLEX)
/* Used with comptypes, and related functions, to guide type
comparison. */
#define COMPARE_STRICT 0 /* Just check if the types are the
same. */
#define COMPARE_BASE 1 /* Check to see if the second type is
derived from the first. */
#define COMPARE_DERIVED 2 /* Like COMPARE_BASE, but in
reverse. */
#define COMPARE_REDECLARATION 4 /* The comparison is being done when
another declaration of an existing
entity is seen. */
#define COMPARE_STRUCTURAL 8 /* The comparison is intended to be
structural. The actual comparison
will be identical to
COMPARE_STRICT. */
/* Used with push_overloaded_decl. */
#define PUSH_GLOBAL 0 /* Push the DECL into namespace scope,
regardless of the current scope. */
#define PUSH_LOCAL 1 /* Push the DECL into the current
scope. */
#define PUSH_USING 2 /* We are pushing this DECL as the
result of a using declaration. */
/* Used with start function. */
#define SF_DEFAULT 0 /* No flags. */
#define SF_PRE_PARSED 1 /* The function declaration has
already been parsed. */
#define SF_INCLASS_INLINE 2 /* The function is an inline, defined
in the class body. */
/* Used with start_decl's initialized parameter. */
#define SD_UNINITIALIZED 0
#define SD_INITIALIZED 1
#define SD_DEFAULTED 2
#define SD_DELETED 3
/* Returns nonzero iff TYPE1 and TYPE2 are the same type, or if TYPE2
is derived from TYPE1, or if TYPE2 is a pointer (reference) to a
class derived from the type pointed to (referred to) by TYPE1. */
#define same_or_base_type_p(TYPE1, TYPE2) \
comptypes ((TYPE1), (TYPE2), COMPARE_BASE)
/* These macros are used to access a TEMPLATE_PARM_INDEX. */
#define TEMPLATE_PARM_INDEX_CAST(NODE) \
((template_parm_index*)TEMPLATE_PARM_INDEX_CHECK (NODE))
#define TEMPLATE_PARM_IDX(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->index)
#define TEMPLATE_PARM_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->level)
#define TEMPLATE_PARM_DESCENDANTS(NODE) (TREE_CHAIN (NODE))
#define TEMPLATE_PARM_ORIG_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->orig_level)
#define TEMPLATE_PARM_DECL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->decl)
#define TEMPLATE_PARM_PARAMETER_PACK(NODE) \
(TREE_LANG_FLAG_0 (TEMPLATE_PARM_INDEX_CHECK (NODE)))
/* These macros are for accessing the fields of TEMPLATE_TYPE_PARM,
TEMPLATE_TEMPLATE_PARM and BOUND_TEMPLATE_TEMPLATE_PARM nodes. */
#define TEMPLATE_TYPE_PARM_INDEX(NODE) \
(TYPE_VALUES_RAW (TREE_CHECK3 ((NODE), TEMPLATE_TYPE_PARM, \
TEMPLATE_TEMPLATE_PARM, \
BOUND_TEMPLATE_TEMPLATE_PARM)))
#define TEMPLATE_TYPE_IDX(NODE) \
(TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_LEVEL(NODE) \
(TEMPLATE_PARM_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_ORIG_LEVEL(NODE) \
(TEMPLATE_PARM_ORIG_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_DECL(NODE) \
(TEMPLATE_PARM_DECL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_PARAMETER_PACK(NODE) \
(TEMPLATE_PARM_PARAMETER_PACK (TEMPLATE_TYPE_PARM_INDEX (NODE)))
/* These constants can used as bit flags in the process of tree formatting.
TFF_PLAIN_IDENTIFIER: unqualified part of a name.
TFF_SCOPE: include the class and namespace scope of the name.
TFF_CHASE_TYPEDEF: print the original type-id instead of the typedef-name.
TFF_DECL_SPECIFIERS: print decl-specifiers.
TFF_CLASS_KEY_OR_ENUM: precede a class-type name (resp. enum name) with
a class-key (resp. `enum').
TFF_RETURN_TYPE: include function return type.
TFF_FUNCTION_DEFAULT_ARGUMENTS: include function default parameter values.
TFF_EXCEPTION_SPECIFICATION: show function exception specification.
TFF_TEMPLATE_HEADER: show the template<...> header in a
template-declaration.
TFF_TEMPLATE_NAME: show only template-name.
TFF_EXPR_IN_PARENS: parenthesize expressions.
TFF_NO_FUNCTION_ARGUMENTS: don't show function arguments.
TFF_UNQUALIFIED_NAME: do not print the qualifying scope of the
top-level entity.
TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS: do not omit template arguments
identical to their defaults. */
#define TFF_PLAIN_IDENTIFIER (0)
#define TFF_SCOPE (1)
#define TFF_CHASE_TYPEDEF (1 << 1)
#define TFF_DECL_SPECIFIERS (1 << 2)
#define TFF_CLASS_KEY_OR_ENUM (1 << 3)
#define TFF_RETURN_TYPE (1 << 4)
#define TFF_FUNCTION_DEFAULT_ARGUMENTS (1 << 5)
#define TFF_EXCEPTION_SPECIFICATION (1 << 6)
#define TFF_TEMPLATE_HEADER (1 << 7)
#define TFF_TEMPLATE_NAME (1 << 8)
#define TFF_EXPR_IN_PARENS (1 << 9)
#define TFF_NO_FUNCTION_ARGUMENTS (1 << 10)
#define TFF_UNQUALIFIED_NAME (1 << 11)
#define TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS (1 << 12)
/* Returns the TEMPLATE_DECL associated to a TEMPLATE_TEMPLATE_PARM
node. */
#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL(NODE) \
((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \
? TYPE_TI_TEMPLATE (NODE) \
: TYPE_NAME (NODE))
/* in lex.c */
extern void init_reswords (void);
typedef struct GTY(()) operator_name_info_t {
/* The IDENTIFIER_NODE for the operator. */
tree identifier;
/* The name of the operator. */
const char *name;
/* The mangled name of the operator. */
const char *mangled_name;
/* The arity of the operator. */
int arity;
} operator_name_info_t;
/* A mapping from tree codes to operator name information. */
extern GTY(()) operator_name_info_t operator_name_info
[(int) MAX_TREE_CODES];
/* Similar, but for assignment operators. */
extern GTY(()) operator_name_info_t assignment_operator_name_info
[(int) MAX_TREE_CODES];
/* A type-qualifier, or bitmask therefore, using the TYPE_QUAL
constants. */
typedef int cp_cv_quals;
/* Non-static member functions have an optional virt-specifier-seq.
There is a VIRT_SPEC value for each virt-specifier.
They can be combined by bitwise-or to form the complete set of
virt-specifiers for a member function. */
enum virt_specifier
{
VIRT_SPEC_UNSPECIFIED = 0x0,
VIRT_SPEC_FINAL = 0x1,
VIRT_SPEC_OVERRIDE = 0x2
};
/* A type-qualifier, or bitmask therefore, using the VIRT_SPEC
constants. */
typedef int cp_virt_specifiers;
/* Wherever there is a function-cv-qual, there could also be a ref-qualifier:
[dcl.fct]
The return type, the parameter-type-list, the ref-qualifier, and
the cv-qualifier-seq, but not the default arguments or the exception
specification, are part of the function type.
REF_QUAL_NONE Ordinary member function with no ref-qualifier
REF_QUAL_LVALUE Member function with the &-ref-qualifier
REF_QUAL_RVALUE Member function with the &&-ref-qualifier */
enum cp_ref_qualifier {
REF_QUAL_NONE = 0,
REF_QUAL_LVALUE = 1,
REF_QUAL_RVALUE = 2
};
/* A storage class. */
typedef enum cp_storage_class {
/* sc_none must be zero so that zeroing a cp_decl_specifier_seq
sets the storage_class field to sc_none. */
sc_none = 0,
sc_auto,
sc_register,
sc_static,
sc_extern,
sc_mutable
} cp_storage_class;
/* An individual decl-specifier. This is used to index the array of
locations for the declspecs in struct cp_decl_specifier_seq
below. */
typedef enum cp_decl_spec {
ds_first,
ds_signed = ds_first,
ds_unsigned,
ds_short,
ds_long,
ds_const,
ds_volatile,
ds_restrict,
ds_inline,
ds_virtual,
ds_explicit,
ds_friend,
ds_typedef,
ds_alias,
ds_constexpr,
ds_complex,
ds_thread,
ds_type_spec,
ds_redefined_builtin_type_spec,
ds_attribute,
ds_std_attribute,
ds_storage_class,
ds_long_long,
ds_last /* This enumerator must always be the last one. */
} cp_decl_spec;
/* A decl-specifier-seq. */
typedef struct cp_decl_specifier_seq {
/* An array of locations for the declaration sepecifiers, indexed by
enum cp_decl_spec_word. */
source_location locations[ds_last];
/* The primary type, if any, given by the decl-specifier-seq.
Modifiers, like "short", "const", and "unsigned" are not
reflected here. This field will be a TYPE, unless a typedef-name
was used, in which case it will be a TYPE_DECL. */
tree type;
/* The attributes, if any, provided with the specifier sequence. */
tree attributes;
/* The c++11 attributes that follows the type specifier. */
tree std_attributes;
/* If non-NULL, a built-in type that the user attempted to redefine
to some other type. */
tree redefined_builtin_type;
/* The storage class specified -- or sc_none if no storage class was
explicitly specified. */
cp_storage_class storage_class;
/* True iff TYPE_SPEC defines a class or enum. */
BOOL_BITFIELD type_definition_p : 1;
/* True iff multiple types were (erroneously) specified for this
decl-specifier-seq. */
BOOL_BITFIELD multiple_types_p : 1;
/* True iff multiple storage classes were (erroneously) specified
for this decl-specifier-seq or a combination of a storage class
with a typedef specifier. */
BOOL_BITFIELD conflicting_specifiers_p : 1;
/* True iff at least one decl-specifier was found. */
BOOL_BITFIELD any_specifiers_p : 1;
/* True iff at least one type-specifier was found. */
BOOL_BITFIELD any_type_specifiers_p : 1;
/* True iff "int" was explicitly provided. */
BOOL_BITFIELD explicit_int_p : 1;
/* True iff "__int128" was explicitly provided. */
BOOL_BITFIELD explicit_int128_p : 1;
/* True iff "char" was explicitly provided. */
BOOL_BITFIELD explicit_char_p : 1;
/* True iff ds_thread is set for __thread, not thread_local. */
BOOL_BITFIELD gnu_thread_keyword_p : 1;
} cp_decl_specifier_seq;
/* The various kinds of declarators. */
typedef enum cp_declarator_kind {
cdk_id,
cdk_function,
cdk_array,
cdk_pointer,
cdk_reference,
cdk_ptrmem,
cdk_error
} cp_declarator_kind;
/* A declarator. */
typedef struct cp_declarator cp_declarator;
typedef struct cp_parameter_declarator cp_parameter_declarator;
/* A parameter, before it has been semantically analyzed. */
struct cp_parameter_declarator {
/* The next parameter, or NULL_TREE if none. */
cp_parameter_declarator *next;
/* The decl-specifiers-seq for the parameter. */
cp_decl_specifier_seq decl_specifiers;
/* The declarator for the parameter. */
cp_declarator *declarator;
/* The default-argument expression, or NULL_TREE, if none. */
tree default_argument;
/* True iff this is the first parameter in the list and the
parameter sequence ends with an ellipsis. */
bool ellipsis_p;
};
/* A declarator. */
struct cp_declarator {
/* The kind of declarator. */
ENUM_BITFIELD (cp_declarator_kind) kind : 4;
/* Whether we parsed an ellipsis (`...') just before the declarator,
to indicate this is a parameter pack. */
BOOL_BITFIELD parameter_pack_p : 1;
location_t id_loc; /* Currently only set for cdk_id and cdk_function. */
/* GNU Attributes that apply to this declarator. If the declarator
is a pointer or a reference, these attribute apply to the type
pointed to. */
tree attributes;
/* Standard C++11 attributes that apply to this declarator. If the
declarator is a pointer or a reference, these attributes apply
to the pointer, rather than to the type pointed to. */
tree std_attributes;
/* For all but cdk_id and cdk_error, the contained declarator. For
cdk_id and cdk_error, guaranteed to be NULL. */
cp_declarator *declarator;
union {
/* For identifiers. */
struct {
/* If non-NULL, the qualifying scope (a NAMESPACE_DECL or
*_TYPE) for this identifier. */
tree qualifying_scope;
/* The unqualified name of the entity -- an IDENTIFIER_NODE,
BIT_NOT_EXPR, or TEMPLATE_ID_EXPR. */
tree unqualified_name;
/* If this is the name of a function, what kind of special
function (if any). */
special_function_kind sfk;
} id;
/* For functions. */
struct {
/* The parameters to the function as a TREE_LIST of decl/default. */
tree parameters;
/* The cv-qualifiers for the function. */
cp_cv_quals qualifiers;
/* The virt-specifiers for the function. */
cp_virt_specifiers virt_specifiers;
/* The ref-qualifier for the function. */
cp_ref_qualifier ref_qualifier;
/* The exception-specification for the function. */
tree exception_specification;
/* The late-specified return type, if any. */
tree late_return_type;
} function;
/* For arrays. */
struct {
/* The bounds to the array. */
tree bounds;
} array;
/* For cdk_pointer and cdk_ptrmem. */
struct {
/* The cv-qualifiers for the pointer. */
cp_cv_quals qualifiers;
/* For cdk_ptrmem, the class type containing the member. */
tree class_type;
} pointer;
/* For cdk_reference */
struct {
/* The cv-qualifiers for the reference. These qualifiers are
only used to diagnose ill-formed code. */
cp_cv_quals qualifiers;
/* Whether this is an rvalue reference */
bool rvalue_ref;
} reference;
} u;
};
/* A level of template instantiation. */
struct GTY((chain_next ("%h.next"))) tinst_level {
/* The immediately deeper level in the chain. */
struct tinst_level *next;
/* The original node. Can be either a DECL (for a function or static
data member) or a TYPE (for a class), depending on what we were
asked to instantiate. */
tree decl;
/* The location where the template is instantiated. */
location_t locus;
/* errorcount+sorrycount when we pushed this level. */
int errors;
/* True if the location is in a system header. */
bool in_system_header_p;
};
bool decl_spec_seq_has_spec_p (const cp_decl_specifier_seq *, cp_decl_spec);
/* Return the type of the `this' parameter of FNTYPE. */
static inline tree
type_of_this_parm (const_tree fntype)
{
function_args_iterator iter;
gcc_assert (TREE_CODE (fntype) == METHOD_TYPE);
function_args_iter_init (&iter, fntype);
return function_args_iter_cond (&iter);
}
/* Return the class of the `this' parameter of FNTYPE. */
static inline tree
class_of_this_parm (const_tree fntype)
{
return TREE_TYPE (type_of_this_parm (fntype));
}
/* A parameter list indicating for a function with no parameters,
e.g "int f(void)". */
extern cp_parameter_declarator *no_parameters;
/* True if we saw "#pragma GCC java_exceptions". */
extern bool pragma_java_exceptions;
/* in call.c */
extern bool check_dtor_name (tree, tree);
extern tree build_conditional_expr (tree, tree, tree,
tsubst_flags_t);
extern tree build_addr_func (tree, tsubst_flags_t);
extern void set_flags_from_callee (tree);
extern tree build_call_a (tree, int, tree*);
extern tree build_call_n (tree, int, ...);
extern bool null_ptr_cst_p (tree);
extern bool null_member_pointer_value_p (tree);
extern bool sufficient_parms_p (const_tree);
extern tree type_decays_to (tree);
extern tree build_user_type_conversion (tree, tree, int,
tsubst_flags_t);
extern tree build_new_function_call (tree, vec<tree, va_gc> **, bool,
tsubst_flags_t);
extern tree build_operator_new_call (tree, vec<tree, va_gc> **, tree *,
tree *, tree, tree *,
tsubst_flags_t);
extern tree build_new_method_call (tree, tree, vec<tree, va_gc> **,
tree, int, tree *,
tsubst_flags_t);
extern tree build_special_member_call (tree, tree, vec<tree, va_gc> **,
tree, int, tsubst_flags_t);
extern tree build_new_op (location_t, enum tree_code,
int, tree, tree, tree, tree *,
tsubst_flags_t);
extern tree build_op_call (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern tree build_op_delete_call (enum tree_code, tree, tree,
bool, tree, tree,
tsubst_flags_t);
extern bool can_convert (tree, tree, tsubst_flags_t);
extern bool can_convert_arg (tree, tree, tree, int,
tsubst_flags_t);
extern bool can_convert_arg_bad (tree, tree, tree, int,
tsubst_flags_t);
extern bool enforce_access (tree, tree, tree,
tsubst_flags_t);
extern void push_defarg_context (tree);
extern void pop_defarg_context (void);
extern tree convert_default_arg (tree, tree, tree, int,
tsubst_flags_t);
extern tree convert_arg_to_ellipsis (tree, tsubst_flags_t);
extern tree build_x_va_arg (source_location, tree, tree);
extern tree cxx_type_promotes_to (tree);
extern tree type_passed_as (tree);
extern tree convert_for_arg_passing (tree, tree, tsubst_flags_t);
extern bool is_properly_derived_from (tree, tree);
extern tree initialize_reference (tree, tree, int,
tsubst_flags_t);
extern tree extend_ref_init_temps (tree, tree, vec<tree, va_gc>**);
extern tree make_temporary_var_for_ref_to_temp (tree, tree);
extern bool type_has_extended_temps (tree);
extern tree strip_top_quals (tree);
extern bool reference_related_p (tree, tree);
extern tree perform_implicit_conversion (tree, tree, tsubst_flags_t);
extern tree perform_implicit_conversion_flags (tree, tree, tsubst_flags_t, int);
extern tree build_integral_nontype_arg_conv (tree, tree, tsubst_flags_t);
extern tree perform_direct_initialization_if_possible (tree, tree, bool,
tsubst_flags_t);
extern tree in_charge_arg_for_name (tree);
extern tree build_cxx_call (tree, int, tree *,
tsubst_flags_t);
extern bool is_std_init_list (tree);
extern bool is_list_ctor (tree);
#ifdef ENABLE_CHECKING
extern void validate_conversion_obstack (void);
#endif /* ENABLE_CHECKING */
extern void mark_versions_used (tree);
extern tree get_function_version_dispatcher (tree);
/* in class.c */
extern tree build_vfield_ref (tree, tree);
extern tree build_base_path (enum tree_code, tree,
tree, int, tsubst_flags_t);
extern tree convert_to_base (tree, tree, bool, bool,
tsubst_flags_t);
extern tree convert_to_base_statically (tree, tree);
extern tree build_vtbl_ref (tree, tree);
extern tree build_vfn_ref (tree, tree);
extern tree get_vtable_decl (tree, int);
extern void resort_type_method_vec (void *, void *,
gt_pointer_operator, void *);
extern bool add_method (tree, tree, tree);
extern bool currently_open_class (tree);
extern tree currently_open_derived_class (tree);
extern tree current_nonlambda_class_type (void);
extern tree finish_struct (tree, tree);
extern void finish_struct_1 (tree);
extern int resolves_to_fixed_type_p (tree, int *);
extern void init_class_processing (void);
extern int is_empty_class (tree);
extern bool is_really_empty_class (tree);
extern void pushclass (tree);
extern void popclass (void);
extern void push_nested_class (tree);
extern void pop_nested_class (void);
extern int current_lang_depth (void);
extern void push_lang_context (tree);
extern void pop_lang_context (void);
extern tree instantiate_type (tree, tree, tsubst_flags_t);
extern void print_class_statistics (void);
extern void build_self_reference (void);
extern int same_signature_p (const_tree, const_tree);
extern void maybe_add_class_template_decl_list (tree, tree, int);
extern void unreverse_member_declarations (tree);
extern void invalidate_class_lookup_cache (void);
extern void maybe_note_name_used_in_class (tree, tree);
extern void note_name_declared_in_class (tree, tree);
extern tree get_vtbl_decl_for_binfo (tree);
extern void debug_class (tree);
extern void debug_thunks (tree);
extern void set_linkage_according_to_type (tree, tree);
extern void determine_key_method (tree);
extern void check_for_override (tree, tree);
extern void push_class_stack (void);
extern void pop_class_stack (void);
extern bool type_has_user_nondefault_constructor (tree);
extern tree in_class_defaulted_default_constructor (tree);
extern bool user_provided_p (tree);
extern bool type_has_user_provided_constructor (tree);
extern bool type_has_user_provided_default_constructor (tree);
extern bool vbase_has_user_provided_move_assign (tree);
extern tree default_init_uninitialized_part (tree);
extern bool trivial_default_constructor_is_constexpr (tree);
extern bool type_has_constexpr_default_constructor (tree);
extern bool type_has_virtual_destructor (tree);
extern bool type_has_move_constructor (tree);
extern bool type_has_move_assign (tree);
extern bool type_has_user_declared_move_constructor (tree);
extern bool type_has_user_declared_move_assign(tree);
extern bool type_build_ctor_call (tree);
extern void explain_non_literal_class (tree);
extern void defaulted_late_check (tree);
extern bool defaultable_fn_check (tree);
extern void fixup_type_variants (tree);
extern void fixup_attribute_variants (tree);
extern tree* decl_cloned_function_p (const_tree, bool);
extern void clone_function_decl (tree, int);
extern void adjust_clone_args (tree);
extern void deduce_noexcept_on_destructor (tree);
extern void insert_late_enum_def_into_classtype_sorted_fields (tree, tree);
extern bool uniquely_derived_from_p (tree, tree);
extern bool publicly_uniquely_derived_p (tree, tree);
/* in cvt.c */
extern tree convert_to_reference (tree, tree, int, int, tree,
tsubst_flags_t);
extern tree convert_from_reference (tree);
extern tree force_rvalue (tree, tsubst_flags_t);
extern tree ocp_convert (tree, tree, int, int,
tsubst_flags_t);
extern tree cp_convert (tree, tree, tsubst_flags_t);
extern tree cp_convert_and_check (tree, tree, tsubst_flags_t);
extern tree cp_fold_convert (tree, tree);
extern tree convert_to_void (tree, impl_conv_void,
tsubst_flags_t);
extern tree convert_force (tree, tree, int,
tsubst_flags_t);
extern tree build_expr_type_conversion (int, tree, bool);
extern tree type_promotes_to (tree);
extern tree perform_qualification_conversions (tree, tree);
/* in name-lookup.c */
extern tree pushdecl (tree);
extern tree pushdecl_maybe_friend (tree, bool);
extern void maybe_push_cleanup_level (tree);
extern tree pushtag (tree, tree, tag_scope);
extern tree make_anon_name (void);
extern tree pushdecl_top_level_maybe_friend (tree, bool);
extern tree pushdecl_top_level_and_finish (tree, tree);
extern tree check_for_out_of_scope_variable (tree);
extern void print_other_binding_stack (cp_binding_level *);
extern tree maybe_push_decl (tree);
extern tree current_decl_namespace (void);
/* decl.c */
extern tree poplevel (int, int, int);
extern void cxx_init_decl_processing (void);
enum cp_tree_node_structure_enum cp_tree_node_structure
(union lang_tree_node *);
extern void finish_scope (void);
extern void push_switch (tree);
extern void pop_switch (void);
extern tree make_lambda_name (void);
extern int decls_match (tree, tree);
extern tree duplicate_decls (tree, tree, bool);
extern tree declare_local_label (tree);
extern tree define_label (location_t, tree);
extern void check_goto (tree);
extern bool check_omp_return (void);
extern tree make_typename_type (tree, tree, enum tag_types, tsubst_flags_t);
extern tree make_unbound_class_template (tree, tree, tree, tsubst_flags_t);
extern tree build_library_fn_ptr (const char *, tree);
extern tree build_cp_library_fn_ptr (const char *, tree);
extern tree push_library_fn (tree, tree, tree);
extern tree push_void_library_fn (tree, tree);
extern tree push_throw_library_fn (tree, tree);
extern void warn_misplaced_attr_for_class_type (source_location location,
tree class_type);
extern tree check_tag_decl (cp_decl_specifier_seq *, bool);
extern tree shadow_tag (cp_decl_specifier_seq *);
extern tree groktypename (cp_decl_specifier_seq *, const cp_declarator *, bool);
extern tree start_decl (const cp_declarator *, cp_decl_specifier_seq *, int, tree, tree, tree *);
extern void start_decl_1 (tree, bool);
extern bool check_array_initializer (tree, tree, tree);
extern void cp_finish_decl (tree, tree, bool, tree, int);
extern int cp_complete_array_type (tree *, tree, bool);
extern int cp_complete_array_type_or_error (tree *, tree, bool, tsubst_flags_t);
extern tree build_ptrmemfunc_type (tree);
extern tree build_ptrmem_type (tree, tree);
/* the grokdeclarator prototype is in decl.h */
extern tree build_this_parm (tree, cp_cv_quals);
extern int copy_fn_p (const_tree);
extern bool move_fn_p (const_tree);
extern bool move_signature_fn_p (const_tree);
extern tree get_scope_of_declarator (const cp_declarator *);
extern void grok_special_member_properties (tree);
extern int grok_ctor_properties (const_tree, const_tree);
extern bool grok_op_properties (tree, bool);
extern tree xref_tag (enum tag_types, tree, tag_scope, bool);
extern tree xref_tag_from_type (tree, tree, tag_scope);
extern bool xref_basetypes (tree, tree);
extern tree start_enum (tree, tree, tree, bool, bool *);
extern void finish_enum_value_list (tree);
extern void finish_enum (tree);
extern void build_enumerator (tree, tree, tree, location_t);
extern tree lookup_enumerator (tree, tree);
extern void start_preparsed_function (tree, tree, int);
extern int start_function (cp_decl_specifier_seq *, const cp_declarator *, tree);
extern tree begin_function_body (void);
extern void finish_function_body (tree);
extern tree outer_curly_brace_block (tree);
extern tree finish_function (int);
extern tree grokmethod (cp_decl_specifier_seq *, const cp_declarator *, tree);
extern void maybe_register_incomplete_var (tree);
extern void maybe_commonize_var (tree);
extern void complete_vars (tree);
extern void finish_stmt (void);
extern tree static_fn_type (tree);
extern void revert_static_member_fn (tree);
extern void fixup_anonymous_aggr (tree);
extern tree compute_array_index_type (tree, tree, tsubst_flags_t);
extern tree check_default_argument (tree, tree, tsubst_flags_t);
typedef int (*walk_namespaces_fn) (tree, void *);
extern int walk_namespaces (walk_namespaces_fn,
void *);
extern int wrapup_globals_for_namespace (tree, void *);
extern tree create_implicit_typedef (tree, tree);
extern int local_variable_p (const_tree);
extern tree register_dtor_fn (tree);
extern tmpl_spec_kind current_tmpl_spec_kind (int);
extern tree cp_fname_init (const char *, tree *);
extern tree cxx_builtin_function (tree decl);
extern tree cxx_builtin_function_ext_scope (tree decl);
extern tree check_elaborated_type_specifier (enum tag_types, tree, bool);
extern void warn_extern_redeclared_static (tree, tree);
extern tree cxx_comdat_group (tree);
extern bool cp_missing_noreturn_ok_p (tree);
extern void initialize_artificial_var (tree, vec<constructor_elt, va_gc> *);
extern tree check_var_type (tree, tree);
extern tree reshape_init (tree, tree, tsubst_flags_t);
extern tree next_initializable_field (tree);
extern bool defer_mark_used_calls;
extern GTY(()) vec<tree, va_gc> *deferred_mark_used_calls;
extern tree finish_case_label (location_t, tree, tree);
extern tree cxx_maybe_build_cleanup (tree, tsubst_flags_t);
/* in decl2.c */
extern bool check_java_method (tree);
extern tree build_memfn_type (tree, tree, cp_cv_quals, cp_ref_qualifier);
extern tree build_pointer_ptrmemfn_type (tree);
extern tree change_return_type (tree, tree);
extern void maybe_retrofit_in_chrg (tree);
extern void maybe_make_one_only (tree);
extern bool vague_linkage_p (tree);
extern void grokclassfn (tree, tree,
enum overload_flags);
extern tree grok_array_decl (location_t, tree, tree, bool);
extern tree delete_sanity (tree, tree, bool, int, tsubst_flags_t);
extern tree check_classfn (tree, tree, tree);
extern void check_member_template (tree);
extern tree grokfield (const cp_declarator *, cp_decl_specifier_seq *,
tree, bool, tree, tree);
extern tree grokbitfield (const cp_declarator *, cp_decl_specifier_seq *,
tree, tree);
extern tree cp_reconstruct_complex_type (tree, tree);
extern void cplus_decl_attributes (tree *, tree, int);
extern void finish_anon_union (tree);
extern void cp_write_global_declarations (void);
extern tree coerce_new_type (tree);
extern tree coerce_delete_type (tree);
extern void comdat_linkage (tree);
extern void determine_visibility (tree);
extern void constrain_class_visibility (tree);
extern void import_export_decl (tree);
extern tree build_cleanup (tree);
extern tree build_offset_ref_call_from_tree (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern bool decl_constant_var_p (tree);
extern bool decl_maybe_constant_var_p (tree);
extern void check_default_args (tree);
extern bool mark_used (tree);
extern void finish_static_data_member_decl (tree, tree, bool, tree, int);
extern tree cp_build_parm_decl (tree, tree);
extern tree get_guard (tree);
extern tree get_guard_cond (tree);
extern tree set_guard (tree);
extern tree get_tls_wrapper_fn (tree);
extern void mark_needed (tree);
extern bool decl_needed_p (tree);
extern void note_vague_linkage_fn (tree);
extern tree build_artificial_parm (tree, tree);
extern bool possibly_inlined_p (tree);
extern int parm_index (tree);
/* in error.c */
extern void init_error (void);
extern const char *type_as_string (tree, int);
extern const char *type_as_string_translate (tree, int);
extern const char *decl_as_string (tree, int);
extern const char *decl_as_string_translate (tree, int);
extern const char *decl_as_dwarf_string (tree, int);
extern const char *expr_as_string (tree, int);
extern const char *lang_decl_name (tree, int, bool);
extern const char *lang_decl_dwarf_name (tree, int, bool);
extern const char *language_to_string (enum languages);
extern const char *class_key_or_enum_as_string (tree);
extern void print_instantiation_context (void);
extern void maybe_warn_variadic_templates (void);
extern void maybe_warn_cpp0x (cpp0x_warn_str str);
extern bool pedwarn_cxx98 (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
extern location_t location_of (tree);
extern void qualified_name_lookup_error (tree, tree, tree,
location_t);
/* in except.c */
extern void init_exception_processing (void);
extern tree expand_start_catch_block (tree);
extern void expand_end_catch_block (void);
extern tree build_exc_ptr (void);
extern tree build_throw (tree);
extern int nothrow_libfn_p (const_tree);
extern void check_handlers (tree);
extern tree finish_noexcept_expr (tree, tsubst_flags_t);
extern bool expr_noexcept_p (tree, tsubst_flags_t);
extern void perform_deferred_noexcept_checks (void);
extern bool nothrow_spec_p (const_tree);
extern bool type_noexcept_p (const_tree);
extern bool type_throw_all_p (const_tree);
extern tree build_noexcept_spec (tree, int);
extern void choose_personality_routine (enum languages);
extern tree build_must_not_throw_expr (tree,tree);
extern tree eh_type_info (tree);
extern tree begin_eh_spec_block (void);
extern void finish_eh_spec_block (tree, tree);
extern tree build_eh_type_type (tree);
extern tree cp_protect_cleanup_actions (void);
/* in expr.c */
extern tree cplus_expand_constant (tree);
extern tree mark_rvalue_use (tree);
extern tree mark_lvalue_use (tree);
extern tree mark_type_use (tree);
extern void mark_exp_read (tree);
/* friend.c */
extern int is_friend (tree, tree);
extern void make_friend_class (tree, tree, bool);
extern void add_friend (tree, tree, bool);
extern tree do_friend (tree, tree, tree, tree, enum overload_flags, bool);
/* in init.c */
extern tree expand_member_init (tree);
extern void emit_mem_initializers (tree);
extern tree build_aggr_init (tree, tree, int,
tsubst_flags_t);
extern int is_class_type (tree, int);
extern tree get_type_value (tree);
extern tree build_zero_init (tree, tree, bool);
extern tree build_value_init (tree, tsubst_flags_t);
extern tree build_value_init_noctor (tree, tsubst_flags_t);
extern tree build_offset_ref (tree, tree, bool,
tsubst_flags_t);
extern tree build_new (vec<tree, va_gc> **, tree, tree,
vec<tree, va_gc> **, int,
tsubst_flags_t);
extern tree get_temp_regvar (tree, tree);
extern tree build_vec_init (tree, tree, tree, bool, int,
tsubst_flags_t);
extern tree build_delete (tree, tree,
special_function_kind,
int, int, tsubst_flags_t);
extern void push_base_cleanups (void);
extern tree build_vec_delete (tree, tree,
special_function_kind, int,
tsubst_flags_t);
extern tree create_temporary_var (tree);
extern void initialize_vtbl_ptrs (tree);
extern tree build_java_class_ref (tree);
extern tree integral_constant_value (tree);
extern tree decl_constant_value_safe (tree);
extern int diagnose_uninitialized_cst_or_ref_member (tree, bool, bool);
/* in lex.c */
extern void cxx_dup_lang_specific_decl (tree);
extern void yyungetc (int, int);
extern tree unqualified_name_lookup_error (tree);
extern tree unqualified_fn_lookup_error (tree);
extern tree build_lang_decl (enum tree_code, tree, tree);
extern tree build_lang_decl_loc (location_t, enum tree_code, tree, tree);
extern void retrofit_lang_decl (tree);
extern tree copy_decl (tree);
extern tree copy_type (tree);
extern tree cxx_make_type (enum tree_code);
extern tree make_class_type (enum tree_code);
extern bool cxx_init (void);
extern void cxx_finish (void);
extern bool in_main_input_context (void);
/* in method.c */
extern void init_method (void);
extern tree make_thunk (tree, bool, tree, tree);
extern void finish_thunk (tree);
extern void use_thunk (tree, bool);
extern bool trivial_fn_p (tree);
extern bool maybe_explain_implicit_delete (tree);
extern void explain_implicit_non_constexpr (tree);
extern void deduce_inheriting_ctor (tree);
extern void synthesize_method (tree);
extern tree lazily_declare_fn (special_function_kind,
tree);
extern tree skip_artificial_parms_for (const_tree, tree);
extern int num_artificial_parms_for (const_tree);
extern tree make_alias_for (tree, tree);
extern tree get_copy_ctor (tree, tsubst_flags_t);
extern tree get_copy_assign (tree);
extern tree get_default_ctor (tree);
extern tree get_dtor (tree, tsubst_flags_t);
extern tree locate_ctor (tree);
extern tree implicitly_declare_fn (special_function_kind, tree,
bool, tree, tree);
/* In optimize.c */
extern bool maybe_clone_body (tree);
/* in pt.c */
extern bool check_template_shadow (tree);
extern tree get_innermost_template_args (tree, int);
extern void maybe_begin_member_template_processing (tree);
extern void maybe_end_member_template_processing (void);
extern tree finish_member_template_decl (tree);
extern void begin_template_parm_list (void);
extern bool begin_specialization (void);
extern void reset_specialization (void);
extern void end_specialization (void);
extern void begin_explicit_instantiation (void);
extern void end_explicit_instantiation (void);
extern tree check_explicit_specialization (tree, tree, int, int);
extern int num_template_headers_for_class (tree);
extern void check_template_variable (tree);
extern tree make_auto (void);
extern tree do_auto_deduction (tree, tree, tree);
extern tree type_uses_auto (tree);
extern void append_type_to_template_for_access_check (tree, tree, tree,
location_t);
extern tree splice_late_return_type (tree, tree);
extern bool is_auto (const_tree);
extern tree process_template_parm (tree, location_t, tree,
bool, bool);
extern tree end_template_parm_list (tree);
extern void end_template_decl (void);
extern tree maybe_update_decl_type (tree, tree);
extern bool check_default_tmpl_args (tree, tree, bool, bool, int);
extern tree push_template_decl (tree);
extern tree push_template_decl_real (tree, bool);
extern tree add_inherited_template_parms (tree, tree);
extern bool redeclare_class_template (tree, tree);
extern tree lookup_template_class (tree, tree, tree, tree,
int, tsubst_flags_t);
extern tree lookup_template_function (tree, tree);
extern int uses_template_parms (tree);
extern int uses_template_parms_level (tree, int);
extern bool in_template_function (void);
extern tree instantiate_class_template (tree);
extern tree instantiate_template (tree, tree, tsubst_flags_t);
extern tree fn_type_unification (tree, tree, tree,
const tree *, unsigned int,
tree, unification_kind_t, int,
bool);
extern void mark_decl_instantiated (tree, int);
extern int more_specialized_fn (tree, tree, int);
extern void do_decl_instantiation (tree, tree);
extern void do_type_instantiation (tree, tree, tsubst_flags_t);
extern bool always_instantiate_p (tree);
extern void maybe_instantiate_noexcept (tree);
extern tree instantiate_decl (tree, int, bool);
extern int comp_template_parms (const_tree, const_tree);
extern bool uses_parameter_packs (tree);
extern bool template_parameter_pack_p (const_tree);
extern bool function_parameter_pack_p (const_tree);
extern bool function_parameter_expanded_from_pack_p (tree, tree);
extern tree make_pack_expansion (tree);
extern bool check_for_bare_parameter_packs (tree);
extern tree build_template_info (tree, tree);
extern tree get_template_info (const_tree);
extern vec<qualified_typedef_usage_t, va_gc> *get_types_needing_access_check (tree);
extern int template_class_depth (tree);
extern int is_specialization_of (tree, tree);
extern bool is_specialization_of_friend (tree, tree);
extern tree get_pattern_parm (tree, tree);
extern int comp_template_args (tree, tree);
extern tree maybe_process_partial_specialization (tree);
extern tree most_specialized_instantiation (tree);
extern void print_candidates (tree);
extern void instantiate_pending_templates (int);
extern tree tsubst_default_argument (tree, tree, tree,
tsubst_flags_t);
extern tree tsubst (tree, tree, tsubst_flags_t, tree);
extern tree tsubst_copy_and_build (tree, tree, tsubst_flags_t,
tree, bool, bool);
extern tree most_general_template (tree);
extern tree get_mostly_instantiated_function_type (tree);
extern int problematic_instantiation_changed (void);
extern void record_last_problematic_instantiation (void);
extern struct tinst_level *current_instantiation(void);
extern tree maybe_get_template_decl_from_type_decl (tree);
extern int processing_template_parmlist;
extern bool dependent_type_p (tree);
extern bool dependent_scope_p (tree);
extern bool any_dependent_template_arguments_p (const_tree);
extern bool dependent_template_p (tree);
extern bool dependent_template_id_p (tree, tree);
extern bool type_dependent_expression_p (tree);
extern bool any_type_dependent_arguments_p (const vec<tree, va_gc> *);
extern bool any_type_dependent_elements_p (const_tree);
extern bool type_dependent_expression_p_push (tree);
extern bool value_dependent_expression_p (tree);
extern bool instantiation_dependent_expression_p (tree);
extern bool any_value_dependent_elements_p (const_tree);
extern bool dependent_omp_for_p (tree, tree, tree, tree);
extern tree resolve_typename_type (tree, bool);
extern tree template_for_substitution (tree);
extern tree build_non_dependent_expr (tree);
extern void make_args_non_dependent (vec<tree, va_gc> *);
extern bool reregister_specialization (tree, tree, tree);
extern tree fold_non_dependent_expr (tree);
extern tree fold_non_dependent_expr_sfinae (tree, tsubst_flags_t);
extern bool alias_type_or_template_p (tree);
extern bool alias_template_specialization_p (const_tree);
extern bool explicit_class_specialization_p (tree);
extern int push_tinst_level (tree);
extern void pop_tinst_level (void);
extern struct tinst_level *outermost_tinst_level(void);
extern void init_template_processing (void);
extern void print_template_statistics (void);
bool template_template_parameter_p (const_tree);
bool template_type_parameter_p (const_tree);
extern bool primary_template_instantiation_p (const_tree);
extern tree get_primary_template_innermost_parameters (const_tree);
extern tree get_template_parms_at_level (tree, int);
extern tree get_template_innermost_arguments (const_tree);
extern tree get_template_argument_pack_elems (const_tree);
extern tree get_function_template_decl (const_tree);
extern tree resolve_nondeduced_context (tree);
extern hashval_t iterative_hash_template_arg (tree arg, hashval_t val);
/* in repo.c */
extern void init_repo (void);
extern int repo_emit_p (tree);
extern bool repo_export_class_p (const_tree);
extern void finish_repo (void);
/* in rtti.c */
/* A vector of all tinfo decls that haven't been emitted yet. */
extern GTY(()) vec<tree, va_gc> *unemitted_tinfo_decls;
extern void init_rtti_processing (void);
extern tree build_typeid (tree, tsubst_flags_t);
extern tree get_tinfo_decl (tree);
extern tree get_typeid (tree, tsubst_flags_t);
extern tree build_headof (tree);
extern tree build_dynamic_cast (tree, tree, tsubst_flags_t);
extern void emit_support_tinfos (void);
extern bool emit_tinfo_decl (tree);
/* in search.c */
extern bool accessible_base_p (tree, tree, bool);
extern tree lookup_base (tree, tree, base_access,
base_kind *, tsubst_flags_t);
extern tree dcast_base_hint (tree, tree);
extern int accessible_p (tree, tree, bool);
extern int accessible_in_template_p (tree, tree);
extern tree lookup_field_1 (tree, tree, bool);
extern tree lookup_field (tree, tree, int, bool);
extern int lookup_fnfields_1 (tree, tree);
extern tree lookup_fnfields_slot (tree, tree);
extern tree lookup_fnfields_slot_nolazy (tree, tree);
extern int class_method_index_for_fn (tree, tree);
extern tree lookup_fnfields (tree, tree, int);
extern tree lookup_member (tree, tree, int, bool,
tsubst_flags_t);
extern int look_for_overrides (tree, tree);
extern void get_pure_virtuals (tree);
extern void maybe_suppress_debug_info (tree);
extern void note_debug_info_needed (tree);
extern void print_search_statistics (void);
extern void reinit_search_statistics (void);
extern tree current_scope (void);
extern int at_function_scope_p (void);
extern bool at_class_scope_p (void);
extern bool at_namespace_scope_p (void);
extern tree context_for_name_lookup (tree);
extern tree lookup_conversions (tree);
extern tree binfo_from_vbase (tree);
extern tree binfo_for_vbase (tree, tree);
extern tree look_for_overrides_here (tree, tree);
#define dfs_skip_bases ((tree)1)
extern tree dfs_walk_all (tree, tree (*) (tree, void *),
tree (*) (tree, void *), void *);
extern tree dfs_walk_once (tree, tree (*) (tree, void *),
tree (*) (tree, void *), void *);
extern tree binfo_via_virtual (tree, tree);
extern tree build_baselink (tree, tree, tree, tree);
extern tree adjust_result_of_qualified_name_lookup
(tree, tree, tree);
extern tree copied_binfo (tree, tree);
extern tree original_binfo (tree, tree);
extern int shared_member_p (tree);
/* The representation of a deferred access check. */
typedef struct GTY(()) deferred_access_check {
/* The base class in which the declaration is referenced. */
tree binfo;
/* The declaration whose access must be checked. */
tree decl;
/* The declaration that should be used in the error message. */
tree diag_decl;
/* The location of this access. */
location_t loc;
} deferred_access_check;
/* in semantics.c */
extern void push_deferring_access_checks (deferring_kind);
extern void resume_deferring_access_checks (void);
extern void stop_deferring_access_checks (void);
extern void pop_deferring_access_checks (void);
extern vec<deferred_access_check, va_gc> *get_deferred_access_checks (void);
extern void reopen_deferring_access_checks (vec<deferred_access_check, va_gc> *);
extern void pop_to_parent_deferring_access_checks (void);
extern bool perform_access_checks (vec<deferred_access_check, va_gc> *,
tsubst_flags_t);
extern bool perform_deferred_access_checks (tsubst_flags_t);
extern bool perform_or_defer_access_check (tree, tree, tree,
tsubst_flags_t);
extern int stmts_are_full_exprs_p (void);
extern void init_cp_semantics (void);
extern tree do_poplevel (tree);
extern void add_decl_expr (tree);
extern tree maybe_cleanup_point_expr_void (tree);
extern tree finish_expr_stmt (tree);
extern tree begin_if_stmt (void);
extern void finish_if_stmt_cond (tree, tree);
extern tree finish_then_clause (tree);
extern void begin_else_clause (tree);
extern void finish_else_clause (tree);
extern void finish_if_stmt (tree);
extern tree begin_while_stmt (void);
extern void finish_while_stmt_cond (tree, tree);
extern void finish_while_stmt (tree);
extern tree begin_do_stmt (void);
extern void finish_do_body (tree);
extern void finish_do_stmt (tree, tree);
extern tree finish_return_stmt (tree);
extern tree begin_for_scope (tree *);
extern tree begin_for_stmt (tree, tree);
extern void finish_for_init_stmt (tree);
extern void finish_for_cond (tree, tree);
extern void finish_for_expr (tree, tree);
extern void finish_for_stmt (tree);
extern tree begin_range_for_stmt (tree, tree);
extern void finish_range_for_decl (tree, tree, tree);
extern void finish_range_for_stmt (tree);
extern tree finish_break_stmt (void);
extern tree finish_continue_stmt (void);
extern tree begin_switch_stmt (void);
extern void finish_switch_cond (tree, tree);
extern void finish_switch_stmt (tree);
extern tree finish_goto_stmt (tree);
extern tree begin_try_block (void);
extern void finish_try_block (tree);
extern void finish_handler_sequence (tree);
extern tree begin_function_try_block (tree *);
extern void finish_function_try_block (tree);
extern void finish_function_handler_sequence (tree, tree);
extern void finish_cleanup_try_block (tree);
extern tree begin_handler (void);
extern void finish_handler_parms (tree, tree);
extern void finish_handler (tree);
extern void finish_cleanup (tree, tree);
extern bool literal_type_p (tree);
extern tree register_constexpr_fundef (tree, tree);
extern bool check_constexpr_ctor_body (tree, tree);
extern tree ensure_literal_type_for_constexpr_object (tree);
extern bool potential_constant_expression (tree);
extern bool potential_rvalue_constant_expression (tree);
extern bool require_potential_constant_expression (tree);
extern bool require_potential_rvalue_constant_expression (tree);
extern tree cxx_constant_value (tree);
extern tree maybe_constant_value (tree);
extern tree maybe_constant_init (tree);
extern bool is_sub_constant_expr (tree);
extern bool reduced_constant_expression_p (tree);
extern void explain_invalid_constexpr_fn (tree);
extern vec<tree> cx_error_context (void);
enum {
BCS_NO_SCOPE = 1,
BCS_TRY_BLOCK = 2,
BCS_FN_BODY = 4
};
extern tree begin_compound_stmt (unsigned int);
extern void finish_compound_stmt (tree);
extern tree finish_asm_stmt (int, tree, tree, tree, tree,
tree);
extern tree finish_label_stmt (tree);
extern void finish_label_decl (tree);
extern tree finish_parenthesized_expr (tree);
extern tree finish_non_static_data_member (tree, tree, tree);
extern tree begin_stmt_expr (void);
extern tree finish_stmt_expr_expr (tree, tree);
extern tree finish_stmt_expr (tree, bool);
extern tree stmt_expr_value_expr (tree);
bool empty_expr_stmt_p (tree);
extern tree perform_koenig_lookup (tree, vec<tree, va_gc> *, bool,
tsubst_flags_t);
extern tree finish_call_expr (tree, vec<tree, va_gc> **, bool,
bool, tsubst_flags_t);
extern tree finish_increment_expr (tree, enum tree_code);
extern tree finish_this_expr (void);
extern tree finish_pseudo_destructor_expr (tree, tree, tree);
extern tree finish_unary_op_expr (location_t, enum tree_code, tree,
tsubst_flags_t);
extern tree finish_compound_literal (tree, tree, tsubst_flags_t);
extern tree finish_fname (tree);
extern void finish_translation_unit (void);
extern tree finish_template_type_parm (tree, tree);
extern tree finish_template_template_parm (tree, tree);
extern tree begin_class_definition (tree);
extern void finish_template_decl (tree);
extern tree finish_template_type (tree, tree, int);
extern tree finish_base_specifier (tree, tree, bool);
extern void finish_member_declaration (tree);
extern tree finish_id_expression (tree, tree, tree,
cp_id_kind *,
bool, bool, bool *,
bool, bool, bool, bool,
const char **,
location_t);
extern tree finish_typeof (tree);
extern tree finish_underlying_type (tree);
extern tree calculate_bases (tree);
extern tree finish_bases (tree, bool);
extern tree calculate_direct_bases (tree);
extern tree finish_offsetof (tree);
extern void finish_decl_cleanup (tree, tree);
extern void finish_eh_cleanup (tree);
extern void emit_associated_thunks (tree);
extern void finish_mem_initializers (tree);
extern tree check_template_template_default_arg (tree);
extern bool expand_or_defer_fn_1 (tree);
extern void expand_or_defer_fn (tree);
extern void add_typedef_to_current_template_for_access_check (tree, tree,
location_t);
extern void check_accessibility_of_qualified_id (tree, tree, tree);
extern tree finish_qualified_id_expr (tree, tree, bool, bool,
bool, bool, tsubst_flags_t);
extern void simplify_aggr_init_expr (tree *);
extern void finalize_nrv (tree *, tree, tree);
extern void note_decl_for_pch (tree);
extern tree finish_omp_clauses (tree);
extern void finish_omp_threadprivate (tree);
extern tree begin_omp_structured_block (void);
extern tree finish_omp_structured_block (tree);
extern tree begin_omp_parallel (void);
extern tree finish_omp_parallel (tree, tree);
extern tree begin_omp_task (void);
extern tree finish_omp_task (tree, tree);
extern tree finish_omp_for (location_t, tree, tree,
tree, tree, tree, tree, tree);
extern void finish_omp_atomic (enum tree_code, enum tree_code,
tree, tree, tree, tree, tree);
extern void finish_omp_barrier (void);
extern void finish_omp_flush (void);
extern void finish_omp_taskwait (void);
extern tree begin_transaction_stmt (location_t, tree *, int);
extern void finish_transaction_stmt (tree, tree, int, tree);
extern tree build_transaction_expr (location_t, tree, int, tree);
extern void finish_omp_taskyield (void);
extern bool cxx_omp_create_clause_info (tree, tree, bool, bool, bool);
extern tree baselink_for_fns (tree);
extern void finish_static_assert (tree, tree, location_t,
bool);
extern tree finish_decltype_type (tree, bool, tsubst_flags_t);
extern tree finish_trait_expr (enum cp_trait_kind, tree, tree);
extern tree build_lambda_expr (void);
extern tree build_lambda_object (tree);
extern tree begin_lambda_type (tree);
extern tree lambda_capture_field_type (tree);
extern tree lambda_return_type (tree);
extern tree lambda_proxy_type (tree);
extern tree lambda_function (tree);
extern void apply_deduced_return_type (tree, tree);
extern tree add_capture (tree, tree, tree, bool, bool);
extern tree add_default_capture (tree, tree, tree);
extern tree build_capture_proxy (tree);
extern void insert_capture_proxy (tree);
extern void insert_pending_capture_proxies (void);
extern bool is_capture_proxy (tree);
extern bool is_normal_capture_proxy (tree);
extern void register_capture_members (tree);
extern tree lambda_expr_this_capture (tree);
extern tree maybe_resolve_dummy (tree);
extern tree nonlambda_method_basetype (void);
extern void maybe_add_lambda_conv_op (tree);
extern bool is_lambda_ignored_entity (tree);
/* in tree.c */
extern int cp_tree_operand_length (const_tree);
void cp_free_lang_data (tree t);
extern tree force_target_expr (tree, tree, tsubst_flags_t);
extern tree build_target_expr_with_type (tree, tree, tsubst_flags_t);
extern void lang_check_failed (const char *, int,
const char *) ATTRIBUTE_NORETURN;
extern tree stabilize_expr (tree, tree *);
extern void stabilize_call (tree, tree *);
extern bool stabilize_init (tree, tree *);
extern tree add_stmt_to_compound (tree, tree);
extern void init_tree (void);
extern bool pod_type_p (const_tree);
extern bool layout_pod_type_p (const_tree);
extern bool std_layout_type_p (const_tree);
extern bool trivial_type_p (const_tree);
extern bool trivially_copyable_p (const_tree);
extern bool scalarish_type_p (const_tree);
extern bool type_has_nontrivial_default_init (const_tree);
extern bool type_has_nontrivial_copy_init (const_tree);
extern bool class_tmpl_impl_spec_p (const_tree);
extern int zero_init_p (const_tree);
extern bool check_abi_tag_redeclaration (const_tree, const_tree, const_tree);
extern tree strip_typedefs (tree);
extern tree strip_typedefs_expr (tree);
extern tree copy_binfo (tree, tree, tree,
tree *, int);
extern int member_p (const_tree);
extern cp_lvalue_kind real_lvalue_p (const_tree);
extern cp_lvalue_kind lvalue_kind (const_tree);
extern bool lvalue_or_rvalue_with_address_p (const_tree);
extern bool xvalue_p (const_tree);
extern bool builtin_valid_in_constant_expr_p (const_tree);
extern tree build_min (enum tree_code, tree, ...);
extern tree build_min_nt_loc (location_t, enum tree_code,
...);
extern tree build_min_non_dep (enum tree_code, tree, ...);
extern tree build_min_non_dep_call_vec (tree, tree, vec<tree, va_gc> *);
extern tree build_cplus_new (tree, tree, tsubst_flags_t);
extern tree build_aggr_init_expr (tree, tree);
extern tree get_target_expr (tree);
extern tree get_target_expr_sfinae (tree, tsubst_flags_t);
extern tree build_cplus_array_type (tree, tree);
extern tree build_array_of_n_type (tree, int);
extern tree build_array_copy (tree);
extern tree build_vec_init_expr (tree, tree, tsubst_flags_t);
extern void diagnose_non_constexpr_vec_init (tree);
extern tree hash_tree_cons (tree, tree, tree);
extern tree hash_tree_chain (tree, tree);
extern tree build_qualified_name (tree, tree, tree, bool);
extern tree build_ref_qualified_type (tree, cp_ref_qualifier);
extern int is_overloaded_fn (tree);
extern tree dependent_name (tree);
extern tree get_fns (tree);
extern tree get_first_fn (tree);
extern tree ovl_cons (tree, tree);
extern tree build_overload (tree, tree);
extern tree ovl_scope (tree);
extern bool non_static_member_function_p (tree);
extern const char *cxx_printable_name (tree, int);
extern const char *cxx_printable_name_translate (tree, int);
extern tree build_exception_variant (tree, tree);
extern tree bind_template_template_parm (tree, tree);
extern tree array_type_nelts_total (tree);
extern tree array_type_nelts_top (tree);
extern tree break_out_target_exprs (tree);
extern tree get_type_decl (tree);
extern tree decl_namespace_context (tree);
extern bool decl_anon_ns_mem_p (const_tree);
extern tree lvalue_type (tree);
extern tree error_type (tree);
extern int varargs_function_p (const_tree);
extern bool really_overloaded_fn (tree);
extern bool cp_tree_equal (tree, tree);
extern tree no_linkage_check (tree, bool);
extern void debug_binfo (tree);
extern tree build_dummy_object (tree);
extern tree maybe_dummy_object (tree, tree *);
extern int is_dummy_object (const_tree);
extern const struct attribute_spec cxx_attribute_table[];
extern tree make_ptrmem_cst (tree, tree);
extern tree cp_build_type_attribute_variant (tree, tree);
extern tree cp_build_reference_type (tree, bool);
extern tree move (tree);
extern tree cp_build_qualified_type_real (tree, int, tsubst_flags_t);
#define cp_build_qualified_type(TYPE, QUALS) \
cp_build_qualified_type_real ((TYPE), (QUALS), tf_warning_or_error)
extern bool cv_qualified_p (const_tree);
extern tree cv_unqualified (tree);
extern special_function_kind special_function_p (const_tree);
extern int count_trees (tree);
extern int char_type_p (tree);
extern void verify_stmt_tree (tree);
extern linkage_kind decl_linkage (tree);
extern duration_kind decl_storage_duration (tree);
extern tree cp_walk_subtrees (tree*, int*, walk_tree_fn,
void*, struct pointer_set_t*);
#define cp_walk_tree(tp,func,data,pset) \
walk_tree_1 (tp, func, data, pset, cp_walk_subtrees)
#define cp_walk_tree_without_duplicates(tp,func,data) \
walk_tree_without_duplicates_1 (tp, func, data, cp_walk_subtrees)
extern tree fold_if_not_in_template (tree);
extern tree rvalue (tree);
extern tree convert_bitfield_to_declared_type (tree);
extern tree cp_save_expr (tree);
extern bool cast_valid_in_integral_constant_expression_p (tree);
extern bool cxx_type_hash_eq (const_tree, const_tree);
extern void cxx_print_statistics (void);
extern bool maybe_warn_zero_as_null_pointer_constant (tree, location_t);
/* in ptree.c */
extern void cxx_print_xnode (FILE *, tree, int);
extern void cxx_print_decl (FILE *, tree, int);
extern void cxx_print_type (FILE *, tree, int);
extern void cxx_print_identifier (FILE *, tree, int);
extern void cxx_print_error_function (diagnostic_context *,
const char *,
struct diagnostic_info *);
/* in typeck.c */
extern bool cxx_mark_addressable (tree);
extern int string_conv_p (const_tree, const_tree, int);
extern tree cp_truthvalue_conversion (tree);
extern tree condition_conversion (tree);
extern tree require_complete_type (tree);
extern tree require_complete_type_sfinae (tree, tsubst_flags_t);
extern tree complete_type (tree);
extern tree complete_type_or_else (tree, tree);
extern tree complete_type_or_maybe_complain (tree, tree, tsubst_flags_t);
extern int type_unknown_p (const_tree);
enum { ce_derived, ce_normal, ce_exact };
extern bool comp_except_specs (const_tree, const_tree, int);
extern bool comptypes (tree, tree, int);
extern bool same_type_ignoring_top_level_qualifiers_p (tree, tree);
extern bool compparms (const_tree, const_tree);
extern int comp_cv_qualification (const_tree, const_tree);
extern int comp_cv_qual_signature (tree, tree);
extern tree cxx_sizeof_or_alignof_expr (tree, enum tree_code, bool);
extern tree cxx_sizeof_or_alignof_type (tree, enum tree_code, bool);
extern tree cxx_alignas_expr (tree);
extern tree cxx_sizeof_nowarn (tree);
extern tree is_bitfield_expr_with_lowered_type (const_tree);
extern tree unlowered_expr_type (const_tree);
extern tree decay_conversion (tree, tsubst_flags_t);
extern tree build_class_member_access_expr (tree, tree, tree, bool,
tsubst_flags_t);
extern tree finish_class_member_access_expr (tree, tree, bool,
tsubst_flags_t);
extern tree build_x_indirect_ref (location_t, tree,
ref_operator, tsubst_flags_t);
extern tree cp_build_indirect_ref (tree, ref_operator,
tsubst_flags_t);
extern tree build_array_ref (location_t, tree, tree);
extern tree cp_build_array_ref (location_t, tree, tree,
tsubst_flags_t);
extern tree get_member_function_from_ptrfunc (tree *, tree, tsubst_flags_t);
extern tree cp_build_function_call (tree, tree, tsubst_flags_t);
extern tree cp_build_function_call_nary (tree, tsubst_flags_t, ...)
ATTRIBUTE_SENTINEL;
extern tree cp_build_function_call_vec (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern tree build_x_binary_op (location_t,
enum tree_code, tree,
enum tree_code, tree,
enum tree_code, tree *,
tsubst_flags_t);
extern tree build_x_array_ref (location_t, tree, tree,
tsubst_flags_t);
extern tree build_x_unary_op (location_t,
enum tree_code, tree,
tsubst_flags_t);
extern tree cp_build_addr_expr (tree, tsubst_flags_t);
extern tree cp_build_addr_expr_strict (tree, tsubst_flags_t);
extern tree cp_build_unary_op (enum tree_code, tree, int,
tsubst_flags_t);
extern tree unary_complex_lvalue (enum tree_code, tree);
extern tree build_x_conditional_expr (location_t, tree, tree, tree,
tsubst_flags_t);
extern tree build_x_compound_expr_from_list (tree, expr_list_kind,
tsubst_flags_t);
extern tree build_x_compound_expr_from_vec (vec<tree, va_gc> *,
const char *, tsubst_flags_t);
extern tree build_x_compound_expr (location_t, tree, tree,
tsubst_flags_t);
extern tree build_compound_expr (location_t, tree, tree);
extern tree cp_build_compound_expr (tree, tree, tsubst_flags_t);
extern tree build_static_cast (tree, tree, tsubst_flags_t);
extern tree build_reinterpret_cast (tree, tree, tsubst_flags_t);
extern tree build_const_cast (tree, tree, tsubst_flags_t);
extern tree build_c_cast (location_t, tree, tree);
extern tree cp_build_c_cast (tree, tree, tsubst_flags_t);
extern tree build_x_modify_expr (location_t, tree,
enum tree_code, tree,
tsubst_flags_t);
extern tree cp_build_modify_expr (tree, enum tree_code, tree,
tsubst_flags_t);
extern tree convert_for_initialization (tree, tree, tree, int,
impl_conv_rhs, tree, int,
tsubst_flags_t);
extern int comp_ptr_ttypes (tree, tree);
extern bool comp_ptr_ttypes_const (tree, tree);
extern bool error_type_p (const_tree);
extern int ptr_reasonably_similar (const_tree, const_tree);
extern tree build_ptrmemfunc (tree, tree, int, bool,
tsubst_flags_t);
extern int cp_type_quals (const_tree);
extern int type_memfn_quals (const_tree);
extern cp_ref_qualifier type_memfn_rqual (const_tree);
extern tree apply_memfn_quals (tree, cp_cv_quals, cp_ref_qualifier);
extern bool cp_has_mutable_p (const_tree);
extern bool at_least_as_qualified_p (const_tree, const_tree);
extern void cp_apply_type_quals_to_decl (int, tree);
extern tree build_ptrmemfunc1 (tree, tree, tree);
extern void expand_ptrmemfunc_cst (tree, tree *, tree *);
extern tree type_after_usual_arithmetic_conversions (tree, tree);
extern tree common_pointer_type (tree, tree);
extern tree composite_pointer_type (tree, tree, tree, tree,
composite_pointer_operation,
tsubst_flags_t);
extern tree merge_types (tree, tree);
extern tree strip_array_domain (tree);
extern tree check_return_expr (tree, bool *);
extern tree cp_build_binary_op (location_t,
enum tree_code, tree, tree,
tsubst_flags_t);
#define cxx_sizeof(T) cxx_sizeof_or_alignof_type (T, SIZEOF_EXPR, true)
extern tree build_ptrmemfunc_access_expr (tree, tree);
extern tree build_address (tree);
extern tree build_typed_address (tree, tree);
extern tree build_nop (tree, tree);
extern tree non_reference (tree);
extern tree lookup_anon_field (tree, tree);
extern bool invalid_nonstatic_memfn_p (const_tree, tsubst_flags_t);
extern tree convert_member_func_to_ptr (tree, tree, tsubst_flags_t);
extern tree convert_ptrmem (tree, tree, bool, bool,
tsubst_flags_t);
extern int lvalue_or_else (tree, enum lvalue_use,
tsubst_flags_t);
extern void check_template_keyword (tree);
extern bool check_raw_literal_operator (const_tree decl);
extern bool check_literal_operator_args (const_tree, bool *, bool *);
extern void maybe_warn_about_useless_cast (tree, tree, tsubst_flags_t);
extern tree cp_perform_integral_promotions (tree, tsubst_flags_t);
/* in typeck2.c */
extern void require_complete_eh_spec_types (tree, tree);
extern void cxx_incomplete_type_diagnostic (const_tree, const_tree, diagnostic_t);
#undef cxx_incomplete_type_error
extern void cxx_incomplete_type_error (const_tree, const_tree);
#define cxx_incomplete_type_error(V,T) \
(cxx_incomplete_type_diagnostic ((V), (T), DK_ERROR))
extern tree error_not_base_type (tree, tree);
extern tree binfo_or_else (tree, tree);
extern void cxx_readonly_error (tree, enum lvalue_use);
extern void complete_type_check_abstract (tree);
extern int abstract_virtuals_error (tree, tree);
extern int abstract_virtuals_error_sfinae (tree, tree, tsubst_flags_t);
extern tree store_init_value (tree, tree, vec<tree, va_gc>**, int);
extern void check_narrowing (tree, tree);
extern tree digest_init (tree, tree, tsubst_flags_t);
extern tree digest_init_flags (tree, tree, int);
extern tree build_scoped_ref (tree, tree, tree *);
extern tree build_x_arrow (location_t, tree,
tsubst_flags_t);
extern tree build_m_component_ref (tree, tree, tsubst_flags_t);
extern tree build_functional_cast (tree, tree, tsubst_flags_t);
extern tree add_exception_specifier (tree, tree, int);
extern tree merge_exception_specifiers (tree, tree, tree);
/* in mangle.c */
extern void init_mangle (void);
extern void mangle_decl (tree);
extern const char *mangle_type_string (tree);
extern tree mangle_typeinfo_for_type (tree);
extern tree mangle_typeinfo_string_for_type (tree);
extern tree mangle_vtbl_for_type (tree);
extern tree mangle_vtt_for_type (tree);
extern tree mangle_ctor_vtbl_for_type (tree, tree);
extern tree mangle_thunk (tree, int, tree, tree);
extern tree mangle_conv_op_name_for_type (tree);
extern tree mangle_guard_variable (tree);
extern tree mangle_tls_init_fn (tree);
extern tree mangle_tls_wrapper_fn (tree);
extern bool decl_tls_wrapper_p (tree);
extern tree mangle_ref_init_variable (tree);
/* in dump.c */
extern bool cp_dump_tree (void *, tree);
/* In cp/cp-objcp-common.c. */
extern alias_set_type cxx_get_alias_set (tree);
extern bool cxx_warn_unused_global_decl (const_tree);
extern size_t cp_tree_size (enum tree_code);
extern bool cp_var_mod_type_p (tree, tree);
extern void cxx_initialize_diagnostics (diagnostic_context *);
extern int cxx_types_compatible_p (tree, tree);
extern void init_shadowed_var_for_decl (void);
extern bool cxx_block_may_fallthru (const_tree);
/* in cp-gimplify.c */
extern int cp_gimplify_expr (tree *, gimple_seq *,
gimple_seq *);
extern void cp_genericize (tree);
extern bool cxx_omp_const_qual_no_mutable (tree);
extern enum omp_clause_default_kind cxx_omp_predetermined_sharing (tree);
extern tree cxx_omp_clause_default_ctor (tree, tree, tree);
extern tree cxx_omp_clause_copy_ctor (tree, tree, tree);
extern tree cxx_omp_clause_assign_op (tree, tree, tree);
extern tree cxx_omp_clause_dtor (tree, tree);
extern void cxx_omp_finish_clause (tree);
extern bool cxx_omp_privatize_by_reference (const_tree);
/* in name-lookup.c */
extern void suggest_alternatives_for (location_t, tree);
extern tree strip_using_decl (tree);
/* -- end of C++ */
#endif /* ! GCC_CP_TREE_H */
|
pi_cde.c | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <omp.h>
#define TRYS 5000000
static int throw()
{
double x, y;
x = (double)rand() / (double)RAND_MAX;
y = (double)rand() / (double)RAND_MAX;
if ((x * x + y * y) <= 1.0)
return 1;
return 0;
}
int main(int argc, char **argv)
{
// Steuern: env -> OMP_NUM_THREADS=6
// Unterbinden
omp_set_num_threads(6);
int globalCount = 0, globalSamples = TRYS;
#pragma omp parallel shared(globalSamples) reduction(+ \
: globalCount)
{
#pragma omp for
for (int i = 0; i < globalSamples; ++i)
{
globalCount += throw();
}
#pragma omp critical
printf("Thread %d treffer: %d\n", omp_get_thread_num(), globalCount);
}
double pi = 4.0 * (double)globalCount / (double)(globalSamples);
printf("pi is %.9lf\n", pi);
return 0;
}
|
perturbations.c | /** @file perturbations.c Documented perturbation module
*
* Julien Lesgourgues, 23.09.2010
*
* Deals with the perturbation evolution.
* This module has two purposes:
*
* - at the beginning; to initialize the perturbations, i.e. to
* integrate the perturbation equations, and store temporarily the terms
* contributing to the source functions as a function of conformal
* time. Then, to perform a few manipulations of these terms in order to
* infer the actual source functions \f$ S^{X} (k, \tau) \f$, and to
* store them as a function of conformal time inside an interpolation
* table.
*
* - at any time in the code; to evaluate the source functions at a
* given conformal time (by interpolating within the interpolation
* table).
*
* Hence the following functions can be called from other modules:
*
* -# perturb_init() at the beginning (but after background_init() and thermodynamics_init())
* -# perturb_sources_at_tau() at any later time
* -# perturb_free() at the end, when no more calls to perturb_sources_at_tau() are needed
*/
#include "perturbations.h"
/**
* Source function \f$ S^{X} (k, \tau) \f$ at a given conformal time tau.
*
* Evaluate source functions at given conformal time tau by reading
* the pre-computed table and interpolating.
*
* @param ppt Input: pointer to perturbation structure containing interpolation tables
* @param index_md Input: index of requested mode
* @param index_ic Input: index of requested initial condition
* @param index_type Input: index of requested source function type
* @param tau Input: any value of conformal time
* @param psource Output: vector (already allocated) of source function as a function of k
* @return the error status
*/
int perturb_sources_at_tau(
struct perturbs * ppt,
int index_md,
int index_ic,
int index_type,
double tau,
double * psource
) {
/** Summary: */
/** - interpolate in pre-computed table contained in ppt */
class_call(array_interpolate_two_bis(ppt->tau_sampling,
1,
0,
ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_type],
ppt->k_size[index_md],
ppt->tau_size,
tau,
psource,
ppt->k_size[index_md],
ppt->error_message),
ppt->error_message,
ppt->error_message);
return _SUCCESS_;
}
/**
* Initialize the perturbs structure, and in particular the table of source functions.
*
* Main steps:
*
* - given the values of the flags describing which kind of
* perturbations should be considered (modes: scalar/vector/tensor,
* initial conditions, type of source functions needed...),
* initialize indices and wavenumber list
*
* - define the time sampling for the output source functions
*
* - for each mode (scalar/vector/tensor): initialize the indices of
* relevant perturbations, integrate the differential system,
* compute and store the source functions.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Output: Initialized perturbation structure
* @return the error status
*/
int perturb_init(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt
) {
/** Summary: */
/** - define local variables */
/* running index for modes */
int index_md;
/* running index for initial conditions */
int index_ic;
/* running index for wavenumbers */
int index_k;
/* pointer to one struct perturb_workspace per thread (one if no openmp) */
struct perturb_workspace ** pppw;
/* background quantities */
double w_fld_ini, w_fld_0,dw_over_da_fld,integral_fld;
/* number of threads (always one if no openmp) */
int number_of_threads=1;
/* index of the thread (always 0 if no openmp) */
int thread=0;
/* This code can be optionally compiled with the openmp option for parallel computation.
Inside parallel regions, the use of the command "return" is forbidden.
For error management, instead of "return _FAILURE_", we will set the variable below
to "abort = _TRUE_". This will lead to a "return _FAILURE_" just after leaving the
parallel region. */
int abort;
/* unsigned integer that will be set to the size of the workspace */
size_t sz;
#ifdef _OPENMP
/* instrumentation times */
double tstart, tstop, tspent;
#endif
/** - perform preliminary checks */
if (ppt->has_perturbations == _FALSE_) {
if (ppt->perturbations_verbose > 0)
printf("No sources requested. Perturbation module skipped.\n");
return _SUCCESS_;
}
else {
if (ppt->perturbations_verbose > 0)
printf("Computing sources\n");
}
class_test((ppt->gauge == synchronous) && (pba->has_cdm == _FALSE_),
ppt->error_message,
"In the synchronous gauge, it is not self-consistent to assume no CDM: the later is used to define the initial timelike hypersurface. You can either add a negligible amount of CDM or switch to newtonian gauge");
class_test ((ppr->tight_coupling_approximation < first_order_MB) ||
(ppr->tight_coupling_approximation > compromise_CLASS),
ppt->error_message,
"your tight_coupling_approximation is set to %d, out of range defined in perturbations.h",ppr->tight_coupling_approximation);
class_test ((ppr->radiation_streaming_approximation < rsa_null) ||
(ppr->radiation_streaming_approximation > rsa_none),
ppt->error_message,
"your radiation_streaming_approximation is set to %d, out of range defined in perturbations.h",ppr->radiation_streaming_approximation);
if (pba->has_ur == _TRUE_) {
class_test ((ppr->ur_fluid_approximation < ufa_mb) ||
(ppr->ur_fluid_approximation > ufa_none),
ppt->error_message,
"your ur_fluid_approximation is set to %d, out of range defined in perturbations.h",ppr->ur_fluid_approximation);
}
if (pba->has_ncdm == _TRUE_) {
class_test ((ppr->ncdm_fluid_approximation < ncdmfa_mb) ||
(ppr->ncdm_fluid_approximation > ncdmfa_none),
ppt->error_message,
"your ncdm_fluid_approximation is set to %d, out of range defined in perturbations.h",ppr->ncdm_fluid_approximation);
if (ppt->has_nc_density == _TRUE_) {
if (ppt->perturbations_verbose > 0) {
fprintf(stdout," -> [WARNING:] You request the number count Cl's in presence of non-cold dark matter.\n Like in all previous CLASS and CLASSgal versions, this will be inferred from the total matter density,\n but it could make much more sense physically to compute it from the CDM+baryon density only.\n To get the latter behavior you would just need to change one line in transfer.c:\n search there for a comment starting with 'use here delta_cb'\n");
}
}
}
if (pba->has_fld == _TRUE_) {
/* check values of w_fld at initial time and today */
class_call(background_w_fld(pba, 0., &w_fld_ini,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
class_call(background_w_fld(pba,pba->a_today,&w_fld_0,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
class_test(w_fld_ini >= 0.,
ppt->error_message,
"The fluid is meant to be negligible at early time, and unimportant for defining the initial conditions of other species. You are using parameters for which this assumption may break down, since at early times you have w_fld(a--->0) = %e >= 0",w_fld_ini);
if (pba->use_ppf == _FALSE_) {
class_test((w_fld_ini +1.0)*(w_fld_0+1.0) <= 0.0,
ppt->error_message,
"w crosses -1 between the infinite past and today, and this would lead to divergent perturbation equations for the fluid perturbations. Try to switch to PPF scheme: use_ppf = yes");
/* the next check is meaningful at least for w(a) = w0 + wa*(1-a/a0); for general formulas and with use_ppf=no, you may prefer to comment it out... */
class_test((w_fld_0 == -1.) && (dw_over_da_fld == 0.),
ppt->error_message,
"Your choice of a fluid with (w0,wa)=(-1,0) is not valid due to instabilities in the unphysical perturbations of such a fluid. Try instead with a plain cosmological constant or with PPF scheme: use_ppf = yes");
}
}
if (pba->has_dcdm == _TRUE_) {
class_test((ppt->has_cdi == _TRUE_) || (ppt->has_bi == _TRUE_) || (ppt->has_nid == _TRUE_) || (ppt->has_niv == _TRUE_),
ppt->error_message,
"Non-adiabatic initial conditions not coded in presence of decaying dark matter");
}
class_test(ppt->has_vectors == _TRUE_,
ppt->error_message,
"Vectors not coded yet");
if ((ppt->has_niv == _TRUE_) && (ppt->perturbations_verbose > 0)) {
printf("Warning: the niv initial conditions in CLASS (and also in CAMB) should still be double-checked: if you want to do it and send feedback, you are welcome!\n");
}
if (ppt->has_tensors == _TRUE_) {
ppt->evolve_tensor_ur = _FALSE_;
ppt->evolve_tensor_ncdm = _FALSE_;
switch (ppt->tensor_method) {
case (tm_photons_only):
break;
case (tm_massless_approximation):
if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_))
ppt->evolve_tensor_ur = _TRUE_;
break;
case (tm_exact):
if (pba->has_ur == _TRUE_)
ppt->evolve_tensor_ur = _TRUE_;
if (pba->has_ncdm == _TRUE_)
ppt->evolve_tensor_ncdm = _TRUE_;
break;
}
}
/** - initialize all indices and lists in perturbs structure using perturb_indices_of_perturbs() */
class_call(perturb_indices_of_perturbs(ppr,
pba,
pth,
ppt),
ppt->error_message,
ppt->error_message);
if (ppt->z_max_pk > pth->z_rec) {
class_test(ppt->has_cmb == _TRUE_,
ppt->error_message,
"You requested a very high z_pk=%e, higher than z_rec=%e. This works very well when you don't ask for a calculation of the CMB source function(s). Remove any CMB from your output and try e.g. with 'output=mTk' or 'output=mTk,vTk'",
ppt->z_max_pk,
pth->z_rec);
class_test(ppt->has_source_delta_m == _TRUE_,
ppt->error_message,
"You requested a very high z_pk=%e, higher than z_rec=%e. This works very well when you ask only transfer functions, e.g. with 'output=mTk' or 'output=mTk,vTk'. But if you need the total matter (e.g. with 'mPk', 'dCl', etc.) there is an issue with the calculation of delta_m at very early times. By default, delta_m is a gauge-invariant variable (the density fluctuation in comoving gauge) and this quantity is hard to get accurately at very early times. The solution is to define delta_m as the density fluctuation in the current gauge, synchronous or newtonian. For the moment this must be done manually by commenting the line 'ppw->delta_m += 3. *ppw->pvecback[pba->index_bg_a]*ppw->pvecback[pba->index_bg_H] * ppw->theta_m/k2;' in perturb_sources(). In the future there will be an option for doing it in an easier way.",
ppt->z_max_pk,
pth->z_rec);
}
/** - define the common time sampling for all sources using
perturb_timesampling_for_sources() */
class_call(perturb_timesampling_for_sources(ppr,
pba,
pth,
ppt),
ppt->error_message,
ppt->error_message);
/** - if we want to store perturbations, write titles and allocate storage */
class_call(perturb_prepare_output(pba,ppt,ppr),
ppt->error_message,
ppt->error_message);
/** - create an array of workspaces in multi-thread case */
#ifdef _OPENMP
/************************/
/* For use with CONCEPT */
/************************/
if (pba->num_threads != -1) {
/**
* Explicitly set the number of OpenMP threads.
* Note that the value of OMP_NUM_THREADS is now completely ignored.
*/
omp_set_num_threads(pba->num_threads);
}
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
#pragma omp parallel
{
number_of_threads = omp_get_num_threads();
}
#endif
class_alloc(pppw,number_of_threads * sizeof(struct perturb_workspace *),ppt->error_message);
/** - loop over modes (scalar, tensors, etc). For each mode: */
for (index_md = 0; index_md < ppt->md_size; index_md++) {
if (ppt->perturbations_verbose > 1)
printf("Evolving mode %d/%d\n",index_md+1,ppt->md_size);
abort = _FALSE_;
sz = sizeof(struct perturb_workspace);
#pragma omp parallel \
shared(pppw,ppr,pba,pth,ppt,index_md,abort,number_of_threads) \
private(thread) \
num_threads(number_of_threads)
{
#ifdef _OPENMP
thread=omp_get_thread_num();
#endif
/** - --> (a) create a workspace (one per thread in multi-thread case) */
class_alloc_parallel(pppw[thread],sz,ppt->error_message);
/** - --> (b) initialize indices of vectors of perturbations with perturb_indices_of_current_vectors() */
class_call_parallel(perturb_workspace_init(ppr,
pba,
pth,
ppt,
index_md,
pppw[thread]),
ppt->error_message,
ppt->error_message);
} /* end of parallel region */
if (abort == _TRUE_) return _FAILURE_;
/** - --> (c) loop over initial conditions and wavenumbers; for each of them, evolve perturbations and compute source functions with perturb_solve() */
for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) {
if (ppt->perturbations_verbose > 1) {
printf("Evolving ic %d/%d\n",index_ic+1,ppt->ic_size[index_md]);
printf("evolving %d wavenumbers\n",ppt->k_size[index_md]);
}
abort = _FALSE_;
#pragma omp parallel \
shared(pppw,ppr,pba,pth,ppt,index_md,index_ic,abort,number_of_threads) \
private(index_k,thread,tstart,tstop,tspent) \
num_threads(number_of_threads)
{
#ifdef _OPENMP
thread=omp_get_thread_num();
tspent=0.;
#endif
#pragma omp for schedule (dynamic)
/* integrating backwards is slightly more optimal for parallel runs */
//for (index_k = 0; index_k < ppt->k_size; index_k++) {
for (index_k = ppt->k_size[index_md]-1; index_k >=0; index_k--) {
/************************/
/* For use with CONCEPT */
/************************/
if ((abort == _FALSE_) && (ppt->perturbations_verbose < 0)) {
printf(
"%*sNode %d, thread %d: Evolving mode k = %.3e/Mpc (%d/%d)\n",
pba->indentation, "",
pba->node,
thread,
ppt->k[index_md][index_k],
index_k+1,
ppt->k_size[index_md]
);
fflush(stdout);
}
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
if ((ppt->perturbations_verbose > 2) && (abort == _FALSE_)) {
printf("evolving mode k=%e /Mpc (%d/%d)",ppt->k[index_md][index_k],index_k+1,ppt->k_size[index_md]);
if (pba->sgnK != 0)
printf(" (for scalar modes, corresponds to nu=%e)",sqrt(ppt->k[index_md][index_k]*ppt->k[index_md][index_k]+pba->K)/sqrt(pba->sgnK*pba->K));
printf("\n");
}
#ifdef _OPENMP
tstart = omp_get_wtime();
#endif
class_call_parallel(perturb_solve(ppr,
pba,
pth,
ppt,
index_md,
index_ic,
index_k,
pppw[thread]),
ppt->error_message,
ppt->error_message);
#ifdef _OPENMP
tstop = omp_get_wtime();
tspent += tstop-tstart;
#endif
#pragma omp flush(abort)
} /* end of loop over wavenumbers */
#ifdef _OPENMP
if (ppt->perturbations_verbose>1)
printf("In %s: time spent in parallel region (loop over k's) = %e s for thread %d\n",
__func__,tspent,omp_get_thread_num());
#endif
} /* end of parallel region */
if (abort == _TRUE_) return _FAILURE_;
} /* end of loop over initial conditions */
abort = _FALSE_;
#pragma omp parallel \
shared(pppw,ppt,index_md,abort,number_of_threads) \
private(thread) \
num_threads(number_of_threads)
{
#ifdef _OPENMP
thread=omp_get_thread_num();
#endif
class_call_parallel(perturb_workspace_free(ppt,index_md,pppw[thread]),
ppt->error_message,
ppt->error_message);
} /* end of parallel region */
if (abort == _TRUE_) return _FAILURE_;
} /* end loop over modes */
free(pppw);
return _SUCCESS_;
}
/**
* Free all memory space allocated by perturb_init().
*
* To be called at the end of each run, only when no further calls to
* perturb_sources_at_tau() are needed.
*
* @param ppt Input: perturbation structure to be freed
* @return the error status
*/
int perturb_free(
struct perturbs * ppt
) {
int index_md,index_ic,index_type;
int filenum;
if (ppt->has_perturbations == _TRUE_) {
for (index_md = 0; index_md < ppt->md_size; index_md++) {
for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) {
for (index_type = 0; index_type < ppt->tp_size[index_md]; index_type++) {
free(ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_type]);
}
}
free(ppt->sources[index_md]);
free(ppt->k[index_md]);
}
free(ppt->tau_sampling);
free(ppt->tp_size);
free(ppt->ic_size);
free(ppt->k);
free(ppt->k_size_cmb);
free(ppt->k_size_cl);
free(ppt->k_size);
free(ppt->sources);
/** Stuff related to perturbations output: */
/** - Free non-NULL pointers */
if (ppt->index_k_output_values != NULL)
free(ppt->index_k_output_values);
for (filenum = 0; filenum<_MAX_NUMBER_OF_K_FILES_; filenum++){
if (ppt->scalar_perturbations_data[filenum] != NULL)
free(ppt->scalar_perturbations_data[filenum]);
if (ppt->vector_perturbations_data[filenum] != NULL)
free(ppt->vector_perturbations_data[filenum]);
if (ppt->tensor_perturbations_data[filenum] != NULL)
free(ppt->tensor_perturbations_data[filenum]);
}
}
return _SUCCESS_;
}
/**
* Initialize all indices and allocate most arrays in perturbs structure.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Input/Output: Initialized perturbation structure
* @return the error status
*/
int perturb_indices_of_perturbs(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt
) {
/** Summary: */
/** - define local variables */
int index_type;
int index_md;
int index_ic;
int index_type_common;
/** - count modes (scalar, vector, tensor) and assign corresponding indices */
index_md = 0;
class_define_index(ppt->index_md_scalars,ppt->has_scalars,index_md,1);
class_define_index(ppt->index_md_vectors,ppt->has_vectors,index_md,1);
class_define_index(ppt->index_md_tensors,ppt->has_tensors,index_md,1);
ppt->md_size = index_md;
class_test(index_md == 0,
ppt->error_message,
"you should have at least one out of {scalars, vectors, tensors} !!!");
/** - allocate array of number of types for each mode, ppt->tp_size[index_md] */
class_alloc(ppt->tp_size,ppt->md_size*sizeof(int),ppt->error_message);
/** - allocate array of number of initial conditions for each mode, ppt->ic_size[index_md] */
class_alloc(ppt->ic_size,ppt->md_size*sizeof(int),ppt->error_message);
/** - allocate array of arrays of source functions for each mode, ppt->source[index_md] */
class_alloc(ppt->sources,ppt->md_size * sizeof(double *),ppt->error_message);
/** - initialization of all flags to false (will eventually be set to true later) */
ppt->has_cmb = _FALSE_;
ppt->has_lss = _FALSE_;
ppt->has_source_t = _FALSE_;
ppt->has_source_p = _FALSE_;
ppt->has_source_delta_m = _FALSE_;
ppt->has_source_delta_cb = _FALSE_;
ppt->has_source_delta_g = _FALSE_;
ppt->has_source_delta_b = _FALSE_;
ppt->has_source_delta_cdm = _FALSE_;
ppt->has_source_delta_dcdm = _FALSE_;
ppt->has_source_delta_fld = _FALSE_;
ppt->has_source_delta_scf = _FALSE_;
ppt->has_source_delta_dr = _FALSE_;
ppt->has_source_delta_ur = _FALSE_;
ppt->has_source_delta_ncdm = _FALSE_;
ppt->has_source_theta_m = _FALSE_;
ppt->has_source_theta_cb = _FALSE_;
ppt->has_source_theta_g = _FALSE_;
ppt->has_source_theta_b = _FALSE_;
ppt->has_source_theta_cdm = _FALSE_;
ppt->has_source_theta_dcdm = _FALSE_;
ppt->has_source_theta_fld = _FALSE_;
ppt->has_source_theta_scf = _FALSE_;
ppt->has_source_theta_dr = _FALSE_;
ppt->has_source_theta_ur = _FALSE_;
ppt->has_source_theta_ncdm = _FALSE_;
ppt->has_source_phi = _FALSE_;
ppt->has_source_phi_prime = _FALSE_;
ppt->has_source_phi_plus_psi = _FALSE_;
ppt->has_source_psi = _FALSE_;
ppt->has_source_h = _FALSE_;
ppt->has_source_h_prime = _FALSE_;
ppt->has_source_eta = _FALSE_;
ppt->has_source_eta_prime = _FALSE_;
/** - source flags and indices, for sources that all modes have in
common (temperature, polarization, ...). For temperature, the
term t2 is always non-zero, while other terms are non-zero only
for scalars and vectors. For polarization, the term e is always
non-zero, while the term b is only for vectors and tensors. */
if (ppt->has_cl_cmb_temperature == _TRUE_) {
ppt->has_source_t = _TRUE_;
ppt->has_cmb = _TRUE_;
}
if (ppt->has_cl_cmb_polarization == _TRUE_) {
ppt->has_source_p = _TRUE_;
ppt->has_cmb = _TRUE_;
}
index_type = 0;
class_define_index(ppt->index_tp_t2,ppt->has_source_t,index_type,1);
class_define_index(ppt->index_tp_p,ppt->has_source_p,index_type,1);
index_type_common = index_type;
/* indices for perturbed recombination */
class_define_index(ppt->index_tp_perturbed_recombination_delta_temp,ppt->has_perturbed_recombination,index_type,1);
class_define_index(ppt->index_tp_perturbed_recombination_delta_chi,ppt->has_perturbed_recombination,index_type,1);
/** - define k values with perturb_get_k_list() */
class_call(perturb_get_k_list(ppr,
pba,
pth,
ppt),
ppt->error_message,
ppt->error_message);
/** - loop over modes. Initialize flags and indices which are specific to each mode. */
for (index_md = 0; index_md < ppt->md_size; index_md++) {
/** - (a) scalars */
if (_scalars_) {
/** - --> source flags and indices, for sources that are specific to scalars */
if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) || (ppt->has_cl_lensing_potential)) {
ppt->has_lss = _TRUE_;
ppt->has_source_phi_plus_psi = _TRUE_;
}
if ((ppt->has_pk_matter == _TRUE_) || (ppt->has_nl_corrections_based_on_delta_m)) {
ppt->has_lss = _TRUE_;
ppt->has_source_delta_m = _TRUE_;
if (pba->has_ncdm == _TRUE_){
ppt->has_source_delta_cb = _TRUE_;
}
}
if (ppt->has_density_transfers == _TRUE_) {
ppt->has_lss = _TRUE_;
ppt->has_source_delta_g = _TRUE_;
ppt->has_source_delta_b = _TRUE_;
if (pba->has_cdm == _TRUE_)
ppt->has_source_delta_cdm = _TRUE_;
if (pba->has_dcdm == _TRUE_)
ppt->has_source_delta_dcdm = _TRUE_;
if (pba->has_fld == _TRUE_)
ppt->has_source_delta_fld = _TRUE_;
if (pba->has_scf == _TRUE_)
ppt->has_source_delta_scf = _TRUE_;
if (pba->has_ur == _TRUE_)
ppt->has_source_delta_ur = _TRUE_;
if (pba->has_dr == _TRUE_)
ppt->has_source_delta_dr = _TRUE_;
if (pba->has_ncdm == _TRUE_)
ppt->has_source_delta_ncdm = _TRUE_;
// Thanks to the following lines, (phi,psi) are also stored as sources
// (Obtained directly in newtonian gauge, infereed from (h,eta) in synchronous gauge).
// If density transfer functions are requested in the (default) CLASS format,
// (phi, psi) will be appended to the delta_i's in the final output.
ppt->has_source_phi = _TRUE_;
ppt->has_source_psi = _TRUE_;
}
if (ppt->has_velocity_transfers == _TRUE_) {
ppt->has_lss = _TRUE_;
ppt->has_source_theta_g = _TRUE_;
ppt->has_source_theta_b = _TRUE_;
if ((pba->has_cdm == _TRUE_) && (ppt->gauge != synchronous))
ppt->has_source_theta_cdm = _TRUE_;
if (pba->has_dcdm == _TRUE_)
ppt->has_source_theta_dcdm = _TRUE_;
if (pba->has_fld == _TRUE_)
ppt->has_source_theta_fld = _TRUE_;
if (pba->has_scf == _TRUE_)
ppt->has_source_theta_scf = _TRUE_;
if (pba->has_ur == _TRUE_)
ppt->has_source_theta_ur = _TRUE_;
if (pba->has_dr == _TRUE_)
ppt->has_source_theta_dr = _TRUE_;
if (pba->has_ncdm == _TRUE_)
ppt->has_source_theta_ncdm = _TRUE_;
}
if (ppt->has_cl_number_count == _TRUE_) {
ppt->has_lss = _TRUE_;
if (ppt->has_nc_density == _TRUE_) {
ppt->has_source_delta_m = _TRUE_;
}
if (ppt->has_nc_rsd == _TRUE_) {
ppt->has_source_theta_m = _TRUE_;
if (pba->has_ncdm == _TRUE_)
/* we may not need theta_cb at all, rsd always defined for
the total matter, but at least this is made
available */
ppt->has_source_theta_cb = _TRUE_;
}
if (ppt->has_nc_lens == _TRUE_) {
ppt->has_source_phi_plus_psi = _TRUE_;
}
if (ppt->has_nc_gr == _TRUE_) {
ppt->has_source_phi = _TRUE_;
ppt->has_source_psi = _TRUE_;
ppt->has_source_phi_prime = _TRUE_;
ppt->has_source_phi_plus_psi = _TRUE_;
}
}
if ( ppt->has_metricpotential_transfers == _TRUE_ ) {
if (ppt->gauge == newtonian) {
ppt->has_source_phi = _TRUE_;
ppt->has_source_psi = _TRUE_;
ppt->has_source_phi_prime = _TRUE_;
}
if (ppt->gauge == synchronous) {
ppt->has_source_h = _TRUE_;
ppt->has_source_h_prime = _TRUE_;
ppt->has_source_eta = _TRUE_;
ppt->has_source_eta_prime = _TRUE_;
}
}
index_type = index_type_common;
class_define_index(ppt->index_tp_t0, ppt->has_source_t, index_type,1);
class_define_index(ppt->index_tp_t1, ppt->has_source_t, index_type,1);
class_define_index(ppt->index_tp_delta_m, ppt->has_source_delta_m, index_type,1);
class_define_index(ppt->index_tp_delta_cb, ppt->has_source_delta_cb, index_type,1);
class_define_index(ppt->index_tp_delta_g, ppt->has_source_delta_g, index_type,1);
class_define_index(ppt->index_tp_delta_b, ppt->has_source_delta_b, index_type,1);
class_define_index(ppt->index_tp_delta_cdm, ppt->has_source_delta_cdm, index_type,1);
class_define_index(ppt->index_tp_delta_dcdm, ppt->has_source_delta_dcdm,index_type,1);
class_define_index(ppt->index_tp_delta_fld, ppt->has_source_delta_fld, index_type,1);
class_define_index(ppt->index_tp_delta_scf, ppt->has_source_delta_scf, index_type,1);
class_define_index(ppt->index_tp_delta_dr, ppt->has_source_delta_dr, index_type,1);
class_define_index(ppt->index_tp_delta_ur, ppt->has_source_delta_ur, index_type,1);
class_define_index(ppt->index_tp_delta_ncdm1,ppt->has_source_delta_ncdm,index_type,pba->N_ncdm);
class_define_index(ppt->index_tp_theta_m, ppt->has_source_theta_m, index_type,1);
class_define_index(ppt->index_tp_theta_cb, ppt->has_source_theta_cb, index_type,1);
class_define_index(ppt->index_tp_theta_g, ppt->has_source_theta_g, index_type,1);
class_define_index(ppt->index_tp_theta_b, ppt->has_source_theta_b, index_type,1);
class_define_index(ppt->index_tp_theta_cdm, ppt->has_source_theta_cdm, index_type,1);
class_define_index(ppt->index_tp_theta_dcdm, ppt->has_source_theta_dcdm,index_type,1);
class_define_index(ppt->index_tp_theta_fld, ppt->has_source_theta_fld, index_type,1);
class_define_index(ppt->index_tp_theta_scf, ppt->has_source_theta_scf, index_type,1);
class_define_index(ppt->index_tp_theta_dr, ppt->has_source_theta_dr, index_type,1);
class_define_index(ppt->index_tp_theta_ur, ppt->has_source_theta_ur, index_type,1);
class_define_index(ppt->index_tp_theta_ncdm1,ppt->has_source_theta_ncdm,index_type,pba->N_ncdm);
class_define_index(ppt->index_tp_phi, ppt->has_source_phi, index_type,1);
class_define_index(ppt->index_tp_phi_prime, ppt->has_source_phi_prime, index_type,1);
class_define_index(ppt->index_tp_phi_plus_psi,ppt->has_source_phi_plus_psi,index_type,1);
class_define_index(ppt->index_tp_psi, ppt->has_source_psi, index_type,1);
class_define_index(ppt->index_tp_h, ppt->has_source_h, index_type,1);
class_define_index(ppt->index_tp_h_prime, ppt->has_source_h_prime, index_type,1);
class_define_index(ppt->index_tp_eta, ppt->has_source_eta, index_type,1);
class_define_index(ppt->index_tp_eta_prime, ppt->has_source_eta_prime, index_type,1);
ppt->tp_size[index_md] = index_type;
class_test(index_type == 0,
ppt->error_message,
"inconsistent input: you asked for scalars, so you should have at least one non-zero scalar source type (temperature, polarization, lensing/gravitational potential, ...). Please adjust your input.");
/** - --> count scalar initial conditions (for scalars: ad, cdi, nid, niv; for tensors: only one) and assign corresponding indices */
index_ic = 0;
class_define_index(ppt->index_ic_ad, ppt->has_ad, index_ic,1);
class_define_index(ppt->index_ic_bi, ppt->has_bi, index_ic,1);
class_define_index(ppt->index_ic_cdi,ppt->has_cdi,index_ic,1);
class_define_index(ppt->index_ic_nid,ppt->has_nid,index_ic,1);
class_define_index(ppt->index_ic_niv,ppt->has_niv,index_ic,1);
ppt->ic_size[index_md] = index_ic;
class_test(index_ic == 0,
ppt->error_message,
"you should have at least one adiabatic or isocurvature initial condition...} !!!");
}
/** - (b) vectors */
if (_vectors_) {
/** - --> source flags and indices, for sources that are specific to vectors */
index_type = index_type_common;
class_define_index(ppt->index_tp_t1,ppt->has_source_t,index_type,1);
ppt->tp_size[index_md] = index_type;
/*
class_test(index_type == 0,
ppt->error_message,
"inconsistent input: you asked for vectors, so you should have at least one non-zero vector source type (temperature or polarization). Please adjust your input.");
*/
/** - --> initial conditions for vectors*/
index_ic = 0;
/* not coded yet */
ppt->ic_size[index_md] = index_ic;
}
/** - (c) tensors */
if (_tensors_) {
/** - --> source flags and indices, for sources that are specific to tensors */
index_type = index_type_common;
/* nothing specific, unlike for vectors and scalars! */
ppt->tp_size[index_md] = index_type;
/*
class_test(index_type == 0,
ppt->error_message,
"inconsistent input: you asked for tensors, so you should have at least one non-zero tensor source type (temperature or polarization). Please adjust your input.");
*/
/** - --> only one initial condition for tensors*/
index_ic = 0;
class_define_index(ppt->index_ic_ten,_TRUE_,index_ic,1);
ppt->ic_size[index_md] = index_ic;
}
/** - (d) for each mode, allocate array of arrays of source functions for each initial conditions and wavenumber, (ppt->source[index_md])[index_ic][index_type] */
class_alloc(ppt->sources[index_md],
ppt->ic_size[index_md] * ppt->tp_size[index_md] * sizeof(double *),
ppt->error_message);
}
return _SUCCESS_;
}
/**
* Define time sampling for source functions.
*
* For each type, compute the list of values of tau at which sources
* will be sampled. Knowing the number of tau values, allocate all
* arrays of source functions.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Input/Output: Initialized perturbation structure
* @return the error status
*/
int perturb_timesampling_for_sources(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt
) {
/** Summary: */
/** - define local variables */
int counter;
int index_md;
int index_type;
int index_ic;
int last_index_back;
int last_index_thermo;
int first_index_back;
int first_index_thermo;
double tau;
double tau_ini;
double tau_lower;
double tau_upper;
double tau_mid;
double timescale_source;
double rate_thermo;
double rate_isw_squared;
double a_prime_over_a;
double a_primeprime_over_a;
double * pvecback;
double * pvecthermo;
/** - allocate background/thermodynamics vectors */
class_alloc(pvecback,pba->bg_size_short*sizeof(double),ppt->error_message);
class_alloc(pvecthermo,pth->th_size*sizeof(double),ppt->error_message);
/** - first, just count the number of sampling points in order to allocate the array containing all values */
/** - (a) if CMB requested, first sampling point = when the universe
stops being opaque; otherwise, start sampling gravitational
potential at recombination [however, if perturbed recombination
is requested, we also need to start the system before
recombination. Otherwise, the initial conditions for gas
temperature and ionization fraction perturbations (delta_T = 1/3
delta_b, delta_x_e) are not valid]. */
if ((ppt->has_cmb == _TRUE_)||(ppt->has_perturbed_recombination == _TRUE_)) {
/* using bisection, search time tau such that the ratio of thermo
to Hubble time scales tau_c/tau_h=aH/kappa' is equal to
start_sources_at_tau_c_over_tau_h */
tau_lower = pth->tau_ini;
class_call(background_at_tau(pba,
tau_lower,
pba->short_info,
pba->inter_normal,
&first_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_normal,
&first_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
class_test(pvecback[pba->index_bg_a]*
pvecback[pba->index_bg_H]/
pvecthermo[pth->index_th_dkappa] >
ppr->start_sources_at_tau_c_over_tau_h,
ppt->error_message,
"your choice of initial time for computing sources is inappropriate: it corresponds to an earlier time than the one at which the integration of thermodynamical variables started (tau=%g). You should increase either 'start_sources_at_tau_c_over_tau_h' or 'recfast_z_initial'\n",
tau_lower);
tau_upper = pth->tau_rec;
class_call(background_at_tau(pba,
tau_upper,
pba->short_info,
pba->inter_normal,
&first_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_normal,
&first_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
class_test(pvecback[pba->index_bg_a]*
pvecback[pba->index_bg_H]/
pvecthermo[pth->index_th_dkappa] <
ppr->start_sources_at_tau_c_over_tau_h,
ppt->error_message,
"your choice of initial time for computing sources is inappropriate: it corresponds to a time after recombination. You should decrease 'start_sources_at_tau_c_over_tau_h'\n");
tau_mid = 0.5*(tau_lower + tau_upper);
while (tau_upper - tau_lower > ppr->tol_tau_approx) {
class_call(background_at_tau(pba,
tau_mid,
pba->short_info,
pba->inter_normal,
&first_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_normal,
&first_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
if (pvecback[pba->index_bg_a]*
pvecback[pba->index_bg_H]/
pvecthermo[pth->index_th_dkappa] >
ppr->start_sources_at_tau_c_over_tau_h)
tau_upper = tau_mid;
else
tau_lower = tau_mid;
tau_mid = 0.5*(tau_lower + tau_upper);
}
tau_ini = tau_mid;
}
else {
/* check the time corresponding to the highest redshift requested in output plus one */
class_call(background_tau_of_z(pba,
ppt->z_max_pk+1,
&tau_ini),
pba->error_message,
ppt->error_message);
/* obsolete: previous choice was to start always at recombination time */
/* tau_ini = pth->tau_rec; */
/* set values of first_index_back/thermo */
class_call(background_at_tau(pba,
tau_ini,
pba->short_info,
pba->inter_normal,
&first_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_normal,
&first_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
}
/** - (b) next sampling point = previous + ppr->perturb_sampling_stepsize * timescale_source, where:
- --> if CMB requested:
timescale_source1 = \f$ |g/\dot{g}| = |\dot{\kappa}-\ddot{\kappa}/\dot{\kappa}|^{-1} \f$;
timescale_source2 = \f$ |2\ddot{a}/a-(\dot{a}/a)^2|^{-1/2} \f$ (to sample correctly the late ISW effect; and
timescale_source=1/(1/timescale_source1+1/timescale_source2); repeat till today.
- --> if CMB not requested:
timescale_source = 1/aH; repeat till today.
*/
counter = 1;
last_index_back = first_index_back;
last_index_thermo = first_index_thermo;
tau = tau_ini;
while (tau < pba->conformal_age) {
class_call(background_at_tau(pba,
tau,
pba->short_info,
pba->inter_closeby,
&last_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_closeby,
&last_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
if (ppt->has_cmb == _TRUE_) {
/* variation rate of thermodynamics variables */
rate_thermo = pvecthermo[pth->index_th_rate];
/* variation rate of metric due to late ISW effect (important at late times) */
a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a];
a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a]
+ 2. * a_prime_over_a * a_prime_over_a;
rate_isw_squared = fabs(2.*a_primeprime_over_a-a_prime_over_a*a_prime_over_a);
/* compute rate */
timescale_source = sqrt(rate_thermo*rate_thermo+rate_isw_squared);
}
else {
/* variation rate given by Hubble time */
a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a];
timescale_source = a_prime_over_a;
}
/* check it is non-zero */
class_test(timescale_source == 0.,
ppt->error_message,
"null evolution rate, integration is diverging");
/* compute inverse rate */
timescale_source = 1./timescale_source;
class_test(fabs(ppr->perturb_sampling_stepsize*timescale_source/tau) < ppr->smallest_allowed_variation,
ppt->error_message,
"integration step =%e < machine precision : leads either to numerical error or infinite loop",ppr->perturb_sampling_stepsize*timescale_source);
tau = tau + ppr->perturb_sampling_stepsize*timescale_source;
counter++;
}
/** - --> infer total number of time steps, ppt->tau_size */
ppt->tau_size = counter;
/** - --> allocate array of time steps, ppt->tau_sampling[index_tau] */
class_alloc(ppt->tau_sampling,ppt->tau_size * sizeof(double),ppt->error_message);
/** - --> repeat the same steps, now filling the array with each tau value: */
/** - --> (b.1.) first sampling point = when the universe stops being opaque */
counter = 0;
ppt->tau_sampling[counter]=tau_ini;
/** - --> (b.2.) next sampling point = previous + ppr->perturb_sampling_stepsize * timescale_source, where
timescale_source1 = \f$ |g/\dot{g}| = |\dot{\kappa}-\ddot{\kappa}/\dot{\kappa}|^{-1} \f$;
timescale_source2 = \f$ |2\ddot{a}/a-(\dot{a}/a)^2|^{-1/2} \f$ (to sample correctly the late ISW effect; and
timescale_source=1/(1/timescale_source1+1/timescale_source2); repeat till today.
If CMB not requested:
timescale_source = 1/aH; repeat till today. */
last_index_back = first_index_back;
last_index_thermo = first_index_thermo;
tau = tau_ini;
while (tau < pba->conformal_age) {
class_call(background_at_tau(pba,
tau,
pba->short_info,
pba->inter_closeby,
&last_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_closeby,
&last_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
if (ppt->has_cmb == _TRUE_) {
/* variation rate of thermodynamics variables */
rate_thermo = pvecthermo[pth->index_th_rate];
/* variation rate of metric due to late ISW effect (important at late times) */
a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a];
a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a]
+ 2. * a_prime_over_a * a_prime_over_a;
rate_isw_squared = fabs(2.*a_primeprime_over_a-a_prime_over_a*a_prime_over_a);
/* compute rate */
timescale_source = sqrt(rate_thermo*rate_thermo+rate_isw_squared);
}
else {
a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a];
timescale_source = a_prime_over_a;
}
/* check it is non-zero */
class_test(timescale_source == 0.,
ppt->error_message,
"null evolution rate, integration is diverging");
/* compute inverse rate */
timescale_source = 1./timescale_source;
class_test(fabs(ppr->perturb_sampling_stepsize*timescale_source/tau) < ppr->smallest_allowed_variation,
ppt->error_message,
"integration step =%e < machine precision : leads either to numerical error or infinite loop",ppr->perturb_sampling_stepsize*timescale_source);
tau = tau + ppr->perturb_sampling_stepsize*timescale_source;
counter++;
ppt->tau_sampling[counter]=tau;
}
/** - last sampling point = exactly today */
ppt->tau_sampling[counter] = pba->conformal_age;
free(pvecback);
free(pvecthermo);
/** - loop over modes, initial conditions and types. For each of
them, allocate array of source functions. */
for (index_md = 0; index_md < ppt->md_size; index_md++) {
for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) {
for (index_type = 0; index_type < ppt->tp_size[index_md]; index_type++) {
class_alloc(ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_type],
ppt->k_size[index_md] * ppt->tau_size * sizeof(double),
ppt->error_message);
}
}
}
return _SUCCESS_;
}
/**
* Define the number of comoving wavenumbers using the information
* passed in the precision structure.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Input: pointer to perturbation structure
* @return the error status
*/
int perturb_get_k_list(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt
) {
int index_k, index_k_output, index_mode;
double k,k_min=0.,k_rec,step,tau1;
double * k_max_cmb;
double * k_max_cl;
double k_max=0.;
double scale2;
double *tmp_k_list;
int newk_size, index_newk, add_k_output_value;
/** Summary: */
class_test(ppr->k_step_transition == 0.,
ppt->error_message,
"stop to avoid division by zero");
class_test(pth->rs_rec == 0.,
ppt->error_message,
"stop to avoid division by zero");
/** - allocate arrays related to k list for each mode */
class_alloc(ppt->k_size_cmb,
ppt->md_size*sizeof(int),
ppt->error_message);
class_alloc(ppt->k_size_cl,
ppt->md_size*sizeof(int),
ppt->error_message);
class_alloc(ppt->k_size,
ppt->md_size*sizeof(int),
ppt->error_message);
class_alloc(ppt->k,
ppt->md_size*sizeof(double*),
ppt->error_message);
class_calloc(k_max_cmb,
ppt->md_size,
sizeof(double),
ppt->error_message);
class_calloc(k_max_cl,
ppt->md_size,
sizeof(double),
ppt->error_message);
/** - scalar modes */
if (ppt->has_scalars == _TRUE_) {
/* first value */
if (pba->sgnK == 0) {
/* K<0 (flat) : start close to zero */
k_min=ppr->k_min_tau0/pba->conformal_age;
}
else if (pba->sgnK == -1) {
/* K<0 (open) : start close to sqrt(-K)
(in transfer modules, for scalars, this will correspond to q close to zero;
for vectors and tensors, this value is even smaller than the minimum necessary value) */
k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2));
}
else if (pba->sgnK == 1) {
/* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */
k_min = sqrt((8.-1.e-4)*pba->K);
}
/** - --> find k_max (as well as k_max_cmb[ppt->index_md_scalars], k_max_cl[ppt->index_md_scalars]) */
k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */
k_max_cmb[ppt->index_md_scalars] = k_min;
k_max_cl[ppt->index_md_scalars] = k_min;
k_max = k_min;
if (ppt->has_cls == _TRUE_) {
/* find k_max_cmb[ppt->index_md_scalars] : */
/* choose a k_max_cmb[ppt->index_md_scalars] corresponding to a wavelength on the last
scattering surface seen today under an angle smaller than
pi/lmax: this is equivalent to
k_max_cl[ppt->index_md_scalars]*[comvoving.ang.diameter.distance] > l_max */
k_max_cmb[ppt->index_md_scalars] = ppr->k_max_tau0_over_l_max*ppt->l_scalar_max
/pba->conformal_age/pth->angular_rescaling;
k_max_cl[ppt->index_md_scalars] = k_max_cmb[ppt->index_md_scalars];
k_max = k_max_cmb[ppt->index_md_scalars];
/* find k_max_cl[ppt->index_md_scalars] : */
/* if we need density/lensing Cl's, we must impose a stronger condition,
such that the minimum wavelength on the shell corresponding
to the center of smallest redshift bin is seen under an
angle smaller than pi/lmax. So we must multiply our previous
k_max_cl[ppt->index_md_scalars] by the ratio tau0/(tau0-tau[center of smallest
redshift bin]). Note that we could do the same with the
lensing potential if we needed a very precise C_l^phi-phi at
large l. We don't do it by default, because the lensed ClT,
ClE would be marginally affected. */
if ((ppt->has_cl_number_count == _TRUE_) || (ppt->has_cl_lensing_potential == _TRUE_)) {
class_call(background_tau_of_z(pba,
ppt->selection_mean[0],
&tau1),
pba->error_message,
ppt->error_message);
k_max_cl[ppt->index_md_scalars] = MAX(k_max_cl[ppt->index_md_scalars],ppr->k_max_tau0_over_l_max*ppt->l_lss_max/(pba->conformal_age-tau1)); // to be very accurate we should use angular diameter distance to given redshift instead of comoving radius: would implement corrections depending on curvature
k_max = k_max_cl[ppt->index_md_scalars];
}
}
/* find k_max: */
if ((ppt->has_pk_matter == _TRUE_) || (ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_))
k_max = MAX(k_max,ppt->k_max_for_pk);
if (ppt->has_nl_corrections_based_on_delta_m == _TRUE_)
k_max = MAX(k_max,ppr->halofit_min_k_max);
/** - --> test that result for k_min, k_max make sense */
class_test(k_min<0.,
ppt->error_message,
"buggy definition of k_min");
class_test(k_max<0.,
ppt->error_message,
"buggy definition of k_max");
class_test(k_max<k_min,
ppt->error_message,
"buggy definition of k_min and/or k_max");
/* if K>0, the transfer function will be calculated for discrete
integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and
m=0,1,2 for scalars/vectors/tensors. However we are free to
define in the perturbation module some arbitrary values of k:
later on, the transfer module will interpolate at values of k
corresponding exactly to integer values of nu. Hence, apart
from the value of k_min and the step size in the vicinity of
k_min, we define exactly the same sampling in the three cases
K=0, K<0, K>0 */
/* allocate array with, for the moment, the largest possible size */
class_alloc(ppt->k[ppt->index_md_scalars],
((int)((k_max_cmb[ppt->index_md_scalars]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+
(int)(MAX(ppr->k_per_decade_for_pk,ppr->k_per_decade_for_bao)*log(k_max/k_min)/log(10.))+3)
*sizeof(double),ppt->error_message);
/* first value */
index_k=0;
k = k_min;
ppt->k[ppt->index_md_scalars][index_k] = k;
index_k++;
/* values until k_max_cmb[ppt->index_md_scalars] */
while (k < k_max_cmb[ppt->index_md_scalars]) {
/* the linear step is not constant, it has a step-like shape,
centered around the characteristic scale set by the sound
horizon at recombination (associated to the comoving wavenumber
k_rec) */
step = (ppr->k_step_super
+ 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.)
* (ppr->k_step_sub-ppr->k_step_super)) * k_rec;
/* there is one other thing to take into account in the step
size. There are two other characteristic scales that matter for
the sampling: the Hubble scale today, k0=a0H0, and eventually
curvature scale sqrt(|K|). We define "scale2" as the sum of the
squared Hubble radius and squared curvature radius. We need to
increase the sampling for k<sqrt(scale2), in order to get the
first mutipoles accurate enough. The formula below reduces it
gradually in the k-->0 limit, by up to a factor 10. The actual
stepsize is still fixed by k_step_super, this is just a
reduction factor. */
scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K);
step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction);
class_test(step / k < ppr->smallest_allowed_variation,
ppt->error_message,
"k step =%e < machine precision : leads either to numerical error or infinite loop",
step * k_rec);
k += step;
class_test(k <= ppt->k[ppt->index_md_scalars][index_k-1],
ppt->error_message,
"consecutive values of k should differ and should be in growing order");
ppt->k[ppt->index_md_scalars][index_k] = k;
index_k++;
}
ppt->k_size_cmb[ppt->index_md_scalars] = index_k;
/* values until k_max_cl[ppt->index_md_scalars] */
while (k < k_max_cl[ppt->index_md_scalars]) {
k *= pow(10.,1./(ppr->k_per_decade_for_pk
+(ppr->k_per_decade_for_bao-ppr->k_per_decade_for_pk)
*(1.-tanh(pow((log(k)-log(ppr->k_bao_center*k_rec))/log(ppr->k_bao_width),4)))));
ppt->k[ppt->index_md_scalars][index_k] = k;
index_k++;
}
ppt->k_size_cl[ppt->index_md_scalars] = index_k;
/* values until k_max */
while (k < k_max) {
k *= pow(10.,1./(ppr->k_per_decade_for_pk
+(ppr->k_per_decade_for_bao-ppr->k_per_decade_for_pk)
*(1.-tanh(pow((log(k)-log(ppr->k_bao_center*k_rec))/log(ppr->k_bao_width),4)))));
ppt->k[ppt->index_md_scalars][index_k] = k;
index_k++;
}
ppt->k_size[ppt->index_md_scalars] = index_k;
class_realloc(ppt->k[ppt->index_md_scalars],
ppt->k[ppt->index_md_scalars],
ppt->k_size[ppt->index_md_scalars]*sizeof(double),
ppt->error_message);
}
/** - vector modes */
if (ppt->has_vectors == _TRUE_) {
/* first value */
if (pba->sgnK == 0) {
/* K<0 (flat) : start close to zero */
k_min=ppr->k_min_tau0/pba->conformal_age;
}
else if (pba->sgnK == -1) {
/* K<0 (open) : start close to sqrt(-K)
(in transfer modules, for scalars, this will correspond to q close to zero;
for vectors and tensors, this value is even smaller than the minimum necessary value) */
k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2));
}
else if (pba->sgnK == 1) {
/* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */
k_min = sqrt((7.-1.e-4)*pba->K);
}
/** - --> find k_max (as well as k_max_cmb[ppt->index_md_vectors], k_max_cl[ppt->index_md_vectors]) */
k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */
k_max_cmb[ppt->index_md_vectors] = k_min;
k_max_cl[ppt->index_md_vectors] = k_min;
k_max = k_min;
if (ppt->has_cls == _TRUE_) {
/* find k_max_cmb: */
/* choose a k_max_cmb corresponding to a wavelength on the last
scattering surface seen today under an angle smaller than
pi/lmax: this is equivalent to
k_max_cl*[comvoving.ang.diameter.distance] > l_max */
k_max_cmb[ppt->index_md_vectors] = ppr->k_max_tau0_over_l_max*ppt->l_vector_max
/pba->conformal_age/pth->angular_rescaling;
k_max_cl[ppt->index_md_vectors] = k_max_cmb[ppt->index_md_vectors];
k_max = k_max_cmb[ppt->index_md_vectors];
}
/** - --> test that result for k_min, k_max make sense */
class_test(k_min<0.,
ppt->error_message,
"buggy definition of k_min");
class_test(k_max<0.,
ppt->error_message,
"buggy definition of k_max");
class_test(k_max<k_min,
ppt->error_message,
"buggy definition of k_min and/or k_max");
/* if K>0, the transfer function will be calculated for discrete
integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and
m=0,1,2 for scalars/vectors/tensors. However we are free to
define in the perturbation module some arbitrary values of k:
later on, the transfer module will interpolate at values of k
corresponding exactly to integer values of nu. Hence, apart
from the value of k_min and the step size in the vicinity of
k_min, we define exactly the same sampling in the three cases
K=0, K<0, K>0 */
/* allocate array with, for the moment, the largest possible size */
class_alloc(ppt->k[ppt->index_md_vectors],
((int)((k_max_cmb[ppt->index_md_vectors]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+1)
*sizeof(double),ppt->error_message);
/* first value */
index_k=0;
k = k_min;
ppt->k[ppt->index_md_vectors][index_k] = k;
index_k++;
/* values until k_max_cmb[ppt->index_md_vectors] */
while (k < k_max_cmb[ppt->index_md_vectors]) {
/* the linear step is not constant, it has a step-like shape,
centered around the characteristic scale set by the sound
horizon at recombination (associated to the comoving wavenumber
k_rec) */
step = (ppr->k_step_super
+ 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.)
* (ppr->k_step_sub-ppr->k_step_super)) * k_rec;
/* there is one other thing to take into account in the step
size. There are two other characteristic scales that matter for
the sampling: the Hubble scale today, k0=a0H0, and eventually
curvature scale sqrt(|K|). We define "scale2" as the sum of the
squared Hubble radius and squared curvature radius. We need to
increase the sampling for k<sqrt(scale2), in order to get the
first mutipoles accurate enough. The formula below reduces it
gradually in the k-->0 limit, by up to a factor 10. The actual
stepsize is still fixed by k_step_super, this is just a
reduction factor. */
scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K);
step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction);
class_test(step / k < ppr->smallest_allowed_variation,
ppt->error_message,
"k step =%e < machine precision : leads either to numerical error or infinite loop",
step * k_rec);
k += step;
class_test(k <= ppt->k[ppt->index_md_scalars][index_k-1],
ppt->error_message,
"consecutive values of k should differ and should be in growing order");
ppt->k[ppt->index_md_vectors][index_k] = k;
index_k++;
}
ppt->k_size_cmb[ppt->index_md_vectors] = index_k;
ppt->k_size_cl[ppt->index_md_vectors] = index_k;
ppt->k_size[ppt->index_md_vectors] = index_k;
class_realloc(ppt->k[ppt->index_md_vectors],
ppt->k[ppt->index_md_vectors],
ppt->k_size[ppt->index_md_vectors]*sizeof(double),
ppt->error_message);
}
/** - tensor modes */
if (ppt->has_tensors == _TRUE_) {
/* first value */
if (pba->sgnK == 0) {
/* K<0 (flat) : start close to zero */
k_min=ppr->k_min_tau0/pba->conformal_age;
}
else if (pba->sgnK == -1) {
/* K<0 (open) : start close to sqrt(-K)
(in transfer modules, for scalars, this will correspond to q close to zero;
for vectors and tensors, this value is even smaller than the minimum necessary value) */
k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2));
}
else if (pba->sgnK == 1) {
/* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */
k_min = sqrt((6.-1.e-4)*pba->K);
}
/** - --> find k_max (as well as k_max_cmb[ppt->index_md_tensors], k_max_cl[ppt->index_md_tensors]) */
k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */
k_max_cmb[ppt->index_md_tensors] = k_min;
k_max_cl[ppt->index_md_tensors] = k_min;
k_max = k_min;
if (ppt->has_cls == _TRUE_) {
/* find k_max_cmb[ppt->index_md_tensors]: */
/* choose a k_max_cmb[ppt->index_md_tensors] corresponding to a wavelength on the last
scattering surface seen today under an angle smaller than
pi/lmax: this is equivalent to
k_max_cl[ppt->index_md_tensors]*[comvoving.ang.diameter.distance] > l_max */
k_max_cmb[ppt->index_md_tensors] = ppr->k_max_tau0_over_l_max*ppt->l_tensor_max
/pba->conformal_age/pth->angular_rescaling;
k_max_cl[ppt->index_md_tensors] = k_max_cmb[ppt->index_md_tensors];
k_max = k_max_cmb[ppt->index_md_tensors];
}
/** - --> test that result for k_min, k_max make sense */
class_test(k_min<0.,
ppt->error_message,
"buggy definition of k_min");
class_test(k_max<0.,
ppt->error_message,
"buggy definition of k_max");
class_test(k_max<k_min,
ppt->error_message,
"buggy definition of k_min and/or k_max");
/* if K>0, the transfer function will be calculated for discrete
integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and
m=0,1,2 for scalars/vectors/tensors. However we are free to
define in the perturbation module some arbitrary values of k:
later on, the transfer module will interpolate at values of k
corresponding exactly to integer values of nu. Hence, apart
from the value of k_min and the step size in the vicinity of
k_min, we define exactly the same sampling in the three cases
K=0, K<0, K>0 */
/* allocate array with, for the moment, the largest possible size */
class_alloc(ppt->k[ppt->index_md_tensors],
((int)((k_max_cmb[ppt->index_md_tensors]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+1)
*sizeof(double),ppt->error_message);
/* first value */
index_k=0;
k = k_min;
ppt->k[ppt->index_md_tensors][index_k] = k;
index_k++;
/* values until k_max_cmb[ppt->index_md_tensors] */
while (k < k_max_cmb[ppt->index_md_tensors]) {
/* the linear step is not constant, it has a step-like shape,
centered around the characteristic scale set by the sound
horizon at recombination (associated to the comoving wavenumber
k_rec) */
step = (ppr->k_step_super
+ 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.)
* (ppr->k_step_sub-ppr->k_step_super)) * k_rec;
/* there is one other thing to take into account in the step
size. There are two other characteristic scales that matter for
the sampling: the Hubble scale today, k0=a0H0, and eventually
curvature scale sqrt(|K|). We define "scale2" as the sum of the
squared Hubble radius and squared curvature radius. We need to
increase the sampling for k<sqrt(scale2), in order to get the
first mutipoles accurate enough. The formula below reduces it
gradually in the k-->0 limit, by up to a factor 10. The actual
stepsize is still fixed by k_step_super, this is just a
reduction factor. */
scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K);
step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction);
class_test(step / k < ppr->smallest_allowed_variation,
ppt->error_message,
"k step =%e < machine precision : leads either to numerical error or infinite loop",
step * k_rec);
k += step;
class_test(k <= ppt->k[ppt->index_md_tensors][index_k-1],
ppt->error_message,
"consecutive values of k should differ and should be in growing order");
ppt->k[ppt->index_md_tensors][index_k] = k;
index_k++;
}
ppt->k_size_cmb[ppt->index_md_tensors] = index_k;
ppt->k_size_cl[ppt->index_md_tensors] = index_k;
ppt->k_size[ppt->index_md_tensors] = index_k;
class_realloc(ppt->k[ppt->index_md_tensors],
ppt->k[ppt->index_md_tensors],
ppt->k_size[ppt->index_md_tensors]*sizeof(double),
ppt->error_message);
}
/** - If user asked for k_output_values, add those to all k lists: */
if (ppt->k_output_values_num>0){
/* Allocate storage */
class_alloc(ppt->index_k_output_values,sizeof(double)*ppt->md_size*ppt->k_output_values_num,ppt->error_message);
/** - --> Find indices in ppt->k[index_md] corresponding to 'k_output_values'.
We are assuming that ppt->k is sorted and growing, and we have made sure
that ppt->k_output_values is also sorted and growing.*/
for (index_mode=0; index_mode<ppt->md_size; index_mode++){
newk_size = ppt->k_size[index_mode]+ppt->k_output_values_num;
class_alloc(tmp_k_list,sizeof(double)*newk_size,ppt->error_message);
index_k=0;
index_k_output=0;
for (index_newk=0; index_newk<newk_size; index_newk++){
/** - --> Decide if we should add k_output_value now. This has to be this complicated, since we
can only compare the k-values when both indices are in range.*/
if (index_k >= ppt->k_size[index_mode])
add_k_output_value = _TRUE_;
else if (index_k_output >= ppt->k_output_values_num)
add_k_output_value = _FALSE_;
else if (ppt->k_output_values[index_k_output] < ppt->k[index_mode][index_k])
add_k_output_value = _TRUE_;
else
add_k_output_value = _FALSE_;
if (add_k_output_value == _TRUE_){
tmp_k_list[index_newk] = ppt->k_output_values[index_k_output];
ppt->index_k_output_values[index_mode*ppt->k_output_values_num+index_k_output]=index_newk;
index_k_output++;
}
else{
tmp_k_list[index_newk] = ppt->k[index_mode][index_k];
index_k++;
}
}
free(ppt->k[index_mode]);
ppt->k[index_mode] = tmp_k_list;
ppt->k_size[index_mode] = newk_size;
index_k = newk_size-1;
while (ppt->k[index_mode][index_k] > k_max_cl[index_mode])
index_k--;
ppt->k_size_cl[index_mode] = MIN(index_k+2,ppt->k_size[index_mode]);
index_k = newk_size-1;
while (ppt->k[index_mode][index_k] > k_max_cmb[index_mode])
index_k--;
ppt->k_size_cmb[index_mode] = MIN(index_k+2,ppt->k_size[index_mode]);
/** - --> The two MIN statements are here because in a normal run, the cl and cmb
arrays contain a single k value larger than their respective k_max.
We are mimicking this behavior. */
}
}
/* For testing, can be useful to print the k list in a file:
FILE * out=fopen("output/k","w");
for (index_k=0; index_k < ppt->k_size[0]; index_k++) {
fprintf(out,"%e\n",ppt->k[0][index_k],pba->K);
}
fclose(out);
*/
/** - finally, find the global k_min and k_max for the ensemble of all modes 9scalars, vectors, tensors) */
ppt->k_min = _HUGE_;
ppt->k_max = 0.;
if (ppt->has_scalars == _TRUE_) {
ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_scalars][0]); /* first value, inferred from perturbations structure */
ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_scalars][ppt->k_size[ppt->index_md_scalars]-1]); /* last value, inferred from perturbations structure */
}
if (ppt->has_vectors == _TRUE_) {
ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_vectors][0]); /* first value, inferred from perturbations structure */
ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_vectors][ppt->k_size[ppt->index_md_vectors]-1]); /* last value, inferred from perturbations structure */
}
if (ppt->has_tensors == _TRUE_) {
ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_tensors][0]); /* first value, inferred from perturbations structure */
ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_tensors][ppt->k_size[ppt->index_md_tensors]-1]); /* last value, inferred from perturbations structure */
}
free(k_max_cmb);
free(k_max_cl);
return _SUCCESS_;
}
/**
* Initialize a perturb_workspace structure. All fields are allocated
* here, with the exception of the perturb_vector '-->pv' field, which
* is allocated separately in perturb_vector_init. We allocate one
* such perturb_workspace structure per thread and per mode
* (scalar/../tensor). Then, for each thread, all initial conditions
* and wavenumbers will use the same workspace.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to the thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param ppw Input/Output: pointer to perturb_workspace structure which fields are allocated or filled here
* @return the error status
*/
int perturb_workspace_init(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
int index_mt=0;
int index_ap;
int l;
/** - Compute maximum l_max for any multipole */;
if (_scalars_) {
ppw->max_l_max = MAX(ppr->l_max_g, ppr->l_max_pol_g);
if (pba->has_ur == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ur);
if (pba->has_ncdm == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ncdm);
if (pba->has_dr == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_dr);
}
if (_tensors_) {
ppw->max_l_max = MAX(ppr->l_max_g_ten, ppr->l_max_pol_g_ten);
if (pba->has_ur == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ur);
if (pba->has_ncdm == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ncdm);
}
/** - Allocate \f$ s_l\f$[ ] array for freestreaming of multipoles (see arXiv:1305.3261) and initialize
to 1.0, which is the K=0 value. */
class_alloc(ppw->s_l, sizeof(double)*(ppw->max_l_max+1),ppt->error_message);
for (l=0; l<=ppw->max_l_max; l++){
ppw->s_l[l] = 1.0;
}
/** - define indices of metric perturbations obeying constraint
equations (this can be done once and for all, because the
vector of metric perturbations is the same whatever the
approximation scheme, unlike the vector of quantities to
be integrated, which is allocated separately in
perturb_vector_init) */
if (_scalars_) {
/* newtonian gauge */
if (ppt->gauge == newtonian) {
class_define_index(ppw->index_mt_psi,_TRUE_,index_mt,1); /* psi */
class_define_index(ppw->index_mt_phi_prime,_TRUE_,index_mt,1); /* phi' */
}
/* synchronous gauge (note that eta is counted in the vector of
quantities to be integrated, while here we only consider
quantities obeying to constraint equations) */
if (ppt->gauge == synchronous) {
class_define_index(ppw->index_mt_h_prime,_TRUE_,index_mt,1); /* h' */
class_define_index(ppw->index_mt_h_prime_prime,_TRUE_,index_mt,1); /* h'' */
class_define_index(ppw->index_mt_eta_prime,_TRUE_,index_mt,1); /* eta' */
class_define_index(ppw->index_mt_alpha,_TRUE_,index_mt,1); /* alpha = (h' + 6 tau') / (2 k**2) */
class_define_index(ppw->index_mt_alpha_prime,_TRUE_,index_mt,1); /* alpha' */
}
}
if (_vectors_) {
/* newtonian gauge */
if (ppt->gauge == newtonian) {
class_define_index(ppw->index_mt_V_prime,_TRUE_,index_mt,1);
}
if (ppt->gauge == synchronous) {
class_define_index(ppw->index_mt_hv_prime_prime,_TRUE_,index_mt,1);
}
}
if (_tensors_) {
class_define_index(ppw->index_mt_gw_prime_prime,_TRUE_,index_mt,1);
}
ppw->mt_size = index_mt;
/** - allocate some workspace in which we will store temporarily the
values of background, thermodynamics, metric and source
quantities at a given time */
class_alloc(ppw->pvecback,pba->bg_size_normal*sizeof(double),ppt->error_message);
class_alloc(ppw->pvecthermo,pth->th_size*sizeof(double),ppt->error_message);
class_alloc(ppw->pvecmetric,ppw->mt_size*sizeof(double),ppt->error_message);
/** - count number of approximations, initialize their indices, and allocate their flags */
index_ap=0;
class_define_index(ppw->index_ap_tca,_TRUE_,index_ap,1);
class_define_index(ppw->index_ap_rsa,_TRUE_,index_ap,1);
if (_scalars_) {
class_define_index(ppw->index_ap_ufa,pba->has_ur,index_ap,1);
class_define_index(ppw->index_ap_ncdmfa,pba->has_ncdm,index_ap,1);
}
ppw->ap_size=index_ap;
if (ppw->ap_size > 0)
class_alloc(ppw->approx,ppw->ap_size*sizeof(int),ppt->error_message);
/** - For definiteness, initialize approximation flags to arbitrary
values (correct values are overwritten in
pertub_find_approximation_switches) */
if (_scalars_) {
ppw->approx[ppw->index_ap_tca]=(int)tca_on;
ppw->approx[ppw->index_ap_rsa]=(int)rsa_off;
if (pba->has_ur == _TRUE_) {
ppw->approx[ppw->index_ap_ufa]=(int)ufa_off;
}
if (pba->has_ncdm == _TRUE_) {
ppw->approx[ppw->index_ap_ncdmfa]=(int)ncdmfa_off;
}
}
if (_tensors_) {
ppw->approx[ppw->index_ap_tca]=(int)tca_on;
ppw->approx[ppw->index_ap_rsa]=(int)rsa_off;
}
/** - allocate fields where some of the perturbations are stored */
if (_scalars_) {
if ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) {
class_alloc(ppw->delta_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message);
class_alloc(ppw->theta_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message);
class_alloc(ppw->shear_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message);
}
}
return _SUCCESS_;
}
/**
* Free the perturb_workspace structure (with the exception of the
* perturb_vector '-->pv' field, which is freed separately in
* perturb_vector_free).
*
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param ppw Input: pointer to perturb_workspace structure to be freed
* @return the error status
*/
int perturb_workspace_free (
struct perturbs * ppt,
int index_md,
struct perturb_workspace * ppw
) {
free(ppw->s_l);
free(ppw->pvecback);
free(ppw->pvecthermo);
free(ppw->pvecmetric);
if (ppw->ap_size > 0)
free(ppw->approx);
if (_scalars_) {
if ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) {
free(ppw->delta_ncdm);
free(ppw->theta_ncdm);
free(ppw->shear_ncdm);
}
}
free(ppw);
return _SUCCESS_;
}
/**
* Solve the perturbation evolution for a given mode, initial
* condition and wavenumber, and compute the corresponding source
* functions.
*
* For a given mode, initial condition and wavenumber, this function
* finds the time ranges over which the perturbations can be described
* within a given approximation. For each such range, it initializes
* (or redistributes) perturbations using perturb_vector_init(), and
* integrates over time. Whenever a "source sampling time" is passed,
* the source terms are computed and stored in the source table using
* perturb_sources().
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to the thermodynamics structure
* @param ppt Input/Output: pointer to the perturbation structure (output source functions S(k,tau) written here)
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param index_ic Input: index of initial condition under consideration (ad, iso...)
* @param index_k Input: index of wavenumber
* @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces
* @return the error status
*/
int perturb_solve(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
int index_ic,
int index_k,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
/* contains all fixed parameters, indices and workspaces used by the perturb_derivs function */
struct perturb_parameters_and_workspace ppaw;
/* conformal time */
double tau,tau_lower,tau_upper,tau_mid;
/* multipole */
int l;
/* index running over time */
int index_tau;
/* number of values in the tau_sampling array that should be considered for a given mode */
int tau_actual_size;
/* running index over types (temperature, etc) */
int index_type;
/* Fourier mode */
double k;
/* number of time intervals where the approximation scheme is uniform */
int interval_number;
/* index running over such time intervals */
int index_interval;
/* number of time intervals where each particular approximation is uniform */
int * interval_number_of;
/* edge of intervals where approximation scheme is uniform: tau_ini, tau_switch_1, ..., tau_end */
double * interval_limit;
/* array of approximation scheme within each interval: interval_approx[index_interval][index_ap] */
int ** interval_approx;
/* index running over approximations */
int index_ap;
/* approximation scheme within previous interval: previous_approx[index_ap] */
int * previous_approx;
int n_ncdm,is_early_enough;
/* function pointer to ODE evolver and names of possible evolvers */
extern int evolver_rk();
extern int evolver_ndf15();
int (*generic_evolver)();
/* Related to the perturbation output */
int (*perhaps_print_variables)();
int index_ikout;
/** - initialize indices relevant for back/thermo tables search */
ppw->last_index_back=0;
ppw->last_index_thermo=0;
ppw->inter_mode = pba->inter_normal;
/** - get wavenumber value */
k = ppt->k[index_md][index_k];
class_test(k == 0.,
ppt->error_message,
"stop to avoid division by zero");
/** - If non-zero curvature, update array of free-streaming coefficients ppw->s_l */
if (pba->has_curvature == _TRUE_){
for (l = 0; l<=ppw->max_l_max; l++){
ppw->s_l[l] = sqrt(MAX(1.0-pba->K*(l*l-1.0)/k/k,0.));
}
}
/** - maximum value of tau for which sources are calculated for this wavenumber */
/* by default, today */
tau_actual_size = ppt->tau_size;
/** - using bisection, compute minimum value of tau for which this
wavenumber is integrated */
/* will be at least the first time in the background table */
tau_lower = pba->tau_table[0];
class_call(background_at_tau(pba,
tau_lower,
pba->normal_info,
pba->inter_normal,
&(ppw->last_index_back),
ppw->pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./ppw->pvecback[pba->index_bg_a]-1.,
pth->inter_normal,
&(ppw->last_index_thermo),
ppw->pvecback,
ppw->pvecthermo),
pth->error_message,
ppt->error_message);
/* check that this initial time is indeed OK given imposed
conditions on kappa' and on k/aH */
class_test(ppw->pvecback[pba->index_bg_a]*
ppw->pvecback[pba->index_bg_H]/
ppw->pvecthermo[pth->index_th_dkappa] >
ppr->start_small_k_at_tau_c_over_tau_h, ppt->error_message, "your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time before that at which the background has been integrated. You should increase 'start_small_k_at_tau_c_over_tau_h' up to at least %g, or decrease 'a_ini_over_a_today_default'\n",
ppw->pvecback[pba->index_bg_a]*
ppw->pvecback[pba->index_bg_H]/
ppw->pvecthermo[pth->index_th_dkappa]);
class_test(k/ppw->pvecback[pba->index_bg_a]/ppw->pvecback[pba->index_bg_H] >
ppr->start_large_k_at_tau_h_over_tau_k,
ppt->error_message,
"your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time before that at which the background has been integrated. You should increase 'start_large_k_at_tau_h_over_tau_k' up to at least %g, or decrease 'a_ini_over_a_today_default'\n",
ppt->k[index_md][ppt->k_size[index_md]-1]/ppw->pvecback[pba->index_bg_a]/ ppw->pvecback[pba->index_bg_H]);
if (pba->has_ncdm == _TRUE_) {
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) {
class_test(fabs(ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]-1./3.)>ppr->tol_ncdm_initial_w,
ppt->error_message,
"your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time at which the ncdm species number %d is not ultra-relativistic anymore, with w=%g, p=%g and rho=%g\n",
n_ncdm,
ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm],
ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm],
ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]);
}
}
/* is at most the time at which sources must be sampled */
tau_upper = ppt->tau_sampling[0];
/* start bisection */
tau_mid = 0.5*(tau_lower + tau_upper);
while ((tau_upper - tau_lower)/tau_lower > ppr->tol_tau_approx) {
is_early_enough = _TRUE_;
class_call(background_at_tau(pba,
tau_mid,
pba->normal_info,
pba->inter_normal,
&(ppw->last_index_back),
ppw->pvecback),
pba->error_message,
ppt->error_message);
/* if there are non-cold relics, check that they are relativistic enough */
if (pba->has_ncdm == _TRUE_) {
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) {
if (fabs(ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]-1./3.) > ppr->tol_ncdm_initial_w)
is_early_enough = _FALSE_;
}
}
/* also check that the two conditions on (aH/kappa') and (aH/k) are fulfilled */
if (is_early_enough == _TRUE_) {
class_call(thermodynamics_at_z(pba,
pth,
1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_normal,
&(ppw->last_index_thermo),
ppw->pvecback,
ppw->pvecthermo),
pth->error_message,
ppt->error_message);
if ((ppw->pvecback[pba->index_bg_a]*
ppw->pvecback[pba->index_bg_H]/
ppw->pvecthermo[pth->index_th_dkappa] >
ppr->start_small_k_at_tau_c_over_tau_h) ||
(k/ppw->pvecback[pba->index_bg_a]/ppw->pvecback[pba->index_bg_H] >
ppr->start_large_k_at_tau_h_over_tau_k))
is_early_enough = _FALSE_;
}
if (is_early_enough == _TRUE_)
tau_lower = tau_mid;
else
tau_upper = tau_mid;
tau_mid = 0.5*(tau_lower + tau_upper);
}
tau = tau_mid;
/** - find the number of intervals over which approximation scheme is constant */
class_alloc(interval_number_of,ppw->ap_size*sizeof(int),ppt->error_message);
ppw->inter_mode = pba->inter_normal;
class_call(perturb_find_approximation_number(ppr,
pba,
pth,
ppt,
index_md,
k,
ppw,
tau,
ppt->tau_sampling[tau_actual_size-1],
&interval_number,
interval_number_of),
ppt->error_message,
ppt->error_message);
class_alloc(interval_limit,(interval_number+1)*sizeof(double),ppt->error_message);
class_alloc(interval_approx,interval_number*sizeof(int*),ppt->error_message);
for (index_interval=0; index_interval<interval_number; index_interval++)
class_alloc(interval_approx[index_interval],ppw->ap_size*sizeof(int),ppt->error_message);
class_call(perturb_find_approximation_switches(ppr,
pba,
pth,
ppt,
index_md,
k,
ppw,
tau,
ppt->tau_sampling[tau_actual_size-1],
ppr->tol_tau_approx,
interval_number,
interval_number_of,
interval_limit,
interval_approx),
ppt->error_message,
ppt->error_message);
free(interval_number_of);
/** - fill the structure containing all fixed parameters, indices
and workspaces needed by perturb_derivs */
ppaw.ppr = ppr;
ppaw.pba = pba;
ppaw.pth = pth;
ppaw.ppt = ppt;
ppaw.index_md = index_md;
ppaw.index_ic = index_ic;
ppaw.index_k = index_k;
ppaw.k = k;
ppaw.ppw = ppw;
ppaw.ppw->inter_mode = pba->inter_closeby;
ppaw.ppw->last_index_back = 0;
ppaw.ppw->last_index_thermo = 0;
/** - check whether we need to print perturbations to a file for this wavenumber */
perhaps_print_variables = NULL;
ppw->index_ikout = -1;
for (index_ikout=0; index_ikout<ppt->k_output_values_num; index_ikout++){
if (ppt->index_k_output_values[index_md*ppt->k_output_values_num+index_ikout] == index_k){
ppw->index_ikout = index_ikout;
perhaps_print_variables = perturb_print_variables;
/* class_call(perturb_prepare_output_file(
pba,ppt,ppw,index_ikout,index_md),
ppt->error_message,
ppt->error_message);
*/
}
}
/** - loop over intervals over which approximation scheme is uniform. For each interval: */
for (index_interval=0; index_interval<interval_number; index_interval++) {
/** - --> (a) fix the approximation scheme */
for (index_ap=0; index_ap<ppw->ap_size; index_ap++)
ppw->approx[index_ap]=interval_approx[index_interval][index_ap];
/** - --> (b) get the previous approximation scheme. If the current
interval starts from the initial time tau_ini, the previous
approximation is set to be a NULL pointer, so that the
function perturb_vector_init() knows that perturbations must
be initialized */
if (index_interval==0) {
previous_approx=NULL;
}
else {
previous_approx=interval_approx[index_interval-1];
}
/** - --> (c) define the vector of perturbations to be integrated
over. If the current interval starts from the initial time
tau_ini, fill the vector with initial conditions for each
mode. If it starts from an approximation switching point,
redistribute correctly the perturbations from the previous to
the new vector of perturbations. */
class_call(perturb_vector_init(ppr,
pba,
pth,
ppt,
index_md,
index_ic,
k,
interval_limit[index_interval],
ppw,
previous_approx),
ppt->error_message,
ppt->error_message);
/** - --> (d) integrate the perturbations over the current interval. */
if(ppr->evolver == rk){
generic_evolver = evolver_rk;
}
else{
generic_evolver = evolver_ndf15;
}
class_call(generic_evolver(perturb_derivs,
interval_limit[index_interval],
interval_limit[index_interval+1],
ppw->pv->y,
ppw->pv->used_in_sources,
ppw->pv->pt_size,
&ppaw,
ppr->tol_perturb_integration,
ppr->smallest_allowed_variation,
perturb_timescale,
ppr->perturb_integration_stepsize,
ppt->tau_sampling,
tau_actual_size,
perturb_sources,
perhaps_print_variables,
ppt->error_message),
ppt->error_message,
ppt->error_message);
}
/** - if perturbations were printed in a file, close the file */
//if (perhaps_print_variables != NULL)
// fclose(ppw->perturb_output_file);
/** - fill the source terms array with zeros for all times between
the last integrated time tau_max and tau_today. */
for (index_tau = tau_actual_size; index_tau < ppt->tau_size; index_tau++) {
for (index_type = 0; index_type < ppt->tp_size[index_md]; index_type++) {
ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + index_type]
[index_tau * ppt->k_size[index_md] + index_k] = 0.;
}
}
/** - free quantities allocated at the beginning of the routine */
class_call(perturb_vector_free(ppw->pv),
ppt->error_message,
ppt->error_message);
for (index_interval=0; index_interval<interval_number; index_interval++)
free(interval_approx[index_interval]);
free(interval_approx);
free(interval_limit);
return _SUCCESS_;
}
int perturb_prepare_output(struct background * pba,
struct perturbs * ppt,
struct precision * ppr){
int n_ncdm;
/************************/
/* For use with CONCEPT */
/************************/
char tmp[1024];
int index_q;
int index_l;
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
ppt->scalar_titles[0]='\0';
ppt->vector_titles[0]='\0';
ppt->tensor_titles[0]='\0';
if (ppt->k_output_values_num > 0) {
/** Write titles for all perturbations that we would like to print/store. */
if (ppt->has_scalars == _TRUE_){
class_store_columntitle(ppt->scalar_titles,"tau [Mpc]",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"a",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"delta_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"theta_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"shear_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"pol0_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"pol1_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"pol2_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"delta_b",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"theta_b",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"psi",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"phi",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"phi_prime",_TRUE_); //CGT
/* Perturbed recombination */
class_store_columntitle(ppt->scalar_titles,"delta_Tb",ppt->has_perturbed_recombination);
class_store_columntitle(ppt->scalar_titles,"delta_chi",ppt->has_perturbed_recombination);
/* Ultrarelativistic species */
class_store_columntitle(ppt->scalar_titles,"delta_ur",pba->has_ur);
class_store_columntitle(ppt->scalar_titles,"theta_ur",pba->has_ur);
class_store_columntitle(ppt->scalar_titles,"shear_ur",pba->has_ur);
/* Cold dark matter */
class_store_columntitle(ppt->scalar_titles,"delta_cdm",pba->has_cdm);
class_store_columntitle(ppt->scalar_titles,"theta_cdm",pba->has_cdm);
/* Non-cold dark matter */
if ((pba->has_ncdm == _TRUE_) && ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_))) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
sprintf(tmp,"delta_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
sprintf(tmp,"theta_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
sprintf(tmp,"shear_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
sprintf(tmp,"cs2_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
/************************/
/* For use with CONCEPT */
/************************/
/* Include ncdm Theta_n_q_l_ncdm[n,q,l] in perturbation output */
sprintf(tmp,"M_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
for (index_q=0; index_q<pba->q_size_ncdm[n_ncdm]; index_q++) {
sprintf(tmp,"dlnf0_dlnq_ncdm[%d,%d]",n_ncdm,index_q);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
}
for (index_q=0; index_q<pba->q_size_ncdm[n_ncdm]; index_q++) {
sprintf(tmp,"q_ncdm[%d,%d]",n_ncdm,index_q);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
}
for (index_q=0; index_q<pba->q_size_ncdm[n_ncdm]; index_q++) {
for (index_l=0; index_l<=ppr->l_max_ncdm; index_l++) {
/* sprintf(tmp,"Theta_ncdm[%d](%.16f,%d)",n_ncdm,pba->q_ncdm[n_ncdm][index_q],index_l); */
sprintf(tmp,"Theta_n_q_l_ncdm[%d,%d,%d]",n_ncdm,index_q,index_l);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
}
}
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
}
}
/* Decaying cold dark matter */
class_store_columntitle(ppt->scalar_titles, "delta_dcdm", pba->has_dcdm);
class_store_columntitle(ppt->scalar_titles, "theta_dcdm", pba->has_dcdm);
/* Decay radiation */
class_store_columntitle(ppt->scalar_titles, "delta_dr", pba->has_dr);
class_store_columntitle(ppt->scalar_titles, "theta_dr", pba->has_dr);
class_store_columntitle(ppt->scalar_titles, "shear_dr", pba->has_dr);
/* Scalar field scf */
class_store_columntitle(ppt->scalar_titles, "delta_scf", pba->has_scf);
class_store_columntitle(ppt->scalar_titles, "theta_scf", pba->has_scf);
/************************/
/* For use with CONCEPT */
/************************/
/* Include fld in perturbation output */
class_store_columntitle(ppt->scalar_titles, "delta_fld", pba->has_fld);
class_store_columntitle(ppt->scalar_titles, "theta_fld", pba->has_fld);
/**
* We choose to store cs2_fld = delta_p_fld/delta_rho_fld rather than
* simply delta_p_fld itself, as is done for massive neutrinos.
*/
class_store_columntitle(ppt->scalar_titles, "cs2_fld", pba->has_fld);
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
/************************/
/* For use with CONCEPT */
/************************/
/* Include theta_tot in perturbation output */
class_store_columntitle(ppt->scalar_titles, "theta_tot", _TRUE_);
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
/************************/
/* For use with CONCEPT */
/************************/
/* Include h_prime in perturbation output */
class_store_columntitle(ppt->scalar_titles, "h_prime", ppt->gauge == synchronous);
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
/************************/
/* For use with CONCEPT */
/************************/
/* Include H_T_prime (in N-body gauge) in perturbation output */
class_store_columntitle(ppt->scalar_titles, "H_T_prime", _TRUE_);
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
ppt->number_of_scalar_titles =
get_number_of_titles(ppt->scalar_titles);
}
if (ppt->has_tensors == _TRUE_){
class_store_columntitle(ppt->tensor_titles,"tau [Mpc]",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"a",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"delta_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"shear_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"l4_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"pol0_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"pol2_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"pol4_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"H (gw)",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"Hdot (gwdot)",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"delta_ur",ppt->evolve_tensor_ur);
class_store_columntitle(ppt->tensor_titles,"shear_ur",ppt->evolve_tensor_ur);
class_store_columntitle(ppt->tensor_titles,"l4_ur",ppt->evolve_tensor_ur);
if (ppt->evolve_tensor_ncdm == _TRUE_) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
sprintf(tmp,"delta_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_);
sprintf(tmp,"theta_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_);
sprintf(tmp,"shear_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_);
}
}
ppt->number_of_tensor_titles =
get_number_of_titles(ppt->tensor_titles);
}
}
return _SUCCESS_;
}
/**
* For a given mode and wavenumber, find the number of intervals of
* time between tau_ini and tau_end such that the approximation
* scheme (and the number of perturbation equations) is uniform.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to the thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param k Input: index of wavenumber
* @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces
* @param tau_ini Input: initial time of the perturbation integration
* @param tau_end Input: final time of the perturbation integration
* @param interval_number Output: total number of intervals
* @param interval_number_of Output: number of intervals with respect to each particular approximation
* @return the error status
*/
int perturb_find_approximation_number(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
double k,
struct perturb_workspace * ppw,
double tau_ini,
double tau_end,
int * interval_number,
int * interval_number_of /* interval_number_of[index_ap] (already allocated) */
){
/** Summary: */
/* index running over approximations */
int index_ap;
/* value of a given approximation at tau_ini and tau_end */
int flag_ini,flag_end;
/** - fix default number of intervals to one (if no approximation switch) */
*interval_number=1;
/** - loop over each approximation and add the number of approximation switching times */
for (index_ap=0; index_ap<ppw->ap_size; index_ap++) {
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
tau_ini,
ppw),
ppt->error_message,
ppt->error_message);
flag_ini = ppw->approx[index_ap];
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
tau_end,
ppw),
ppt->error_message,
ppt->error_message);
flag_end = ppw->approx[index_ap];
class_test(flag_end<flag_ini,
ppt->error_message,
"For each approximation scheme, the declaration of approximation labels in the enumeration must follow chronological order, e.g: enum approx_flags {flag1, flag2, flag3} with flag1 being the initial one and flag3 the final one");
*interval_number += flag_end-flag_ini;
interval_number_of[index_ap] = flag_end-flag_ini+1;
}
return _SUCCESS_;
}
/**
* For a given mode and wavenumber, find the values of time at which
* the approximation changes.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to the thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param k Input: index of wavenumber
* @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces
* @param tau_ini Input: initial time of the perturbation integration
* @param tau_end Input: final time of the perturbation integration
* @param precision Input: tolerance on output values
* @param interval_number Input: total number of intervals
* @param interval_number_of Input: number of intervals with respect to each particular approximation
* @param interval_limit Output: value of time at the boundary of the intervals: tau_ini, tau_switch1, ..., tau_end
* @param interval_approx Output: value of approximations in each interval
* @return the error status
*/
int perturb_find_approximation_switches(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
double k,
struct perturb_workspace * ppw,
double tau_ini,
double tau_end,
double precision,
int interval_number,
int * interval_number_of,
double * interval_limit, /* interval_limit[index_interval] (already allocated) */
int ** interval_approx /* interval_approx[index_interval][index_ap] (already allocated) */
){
/** Summary: */
int index_ap;
int index_switch;
int index_switch_tot;
int num_switch;
double tau_min,lower_bound,upper_bound;
double mid=0;
double * unsorted_tau_switch;
double next_tau_switch;
int flag_ini;
int num_switching_at_given_time;
/** - write in output arrays the initial time and approximation */
interval_limit[0]=tau_ini;
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
tau_ini,
ppw),
ppt->error_message,
ppt->error_message);
for (index_ap=0; index_ap<ppw->ap_size; index_ap++)
interval_approx[0][index_ap]=ppw->approx[index_ap];
/** - if there are no approximation switches, just write final time and return */
if (interval_number == 1) {
interval_limit[1]=tau_end;
}
/** - if there are switches, consider approximations one after each
other. Find switching time by bisection. Store all switches in
arbitrary order in array unsorted_tau_switch[ ] */
else {
class_alloc(unsorted_tau_switch,(interval_number-1)*sizeof(double),ppt->error_message);
index_switch_tot=0;
for (index_ap=0; index_ap<ppw->ap_size; index_ap++) {
if (interval_number_of[index_ap] > 1) {
num_switch = interval_number_of[index_ap]-1;
tau_min = tau_ini;
flag_ini = interval_approx[0][index_ap];
for (index_switch=0; index_switch<num_switch; index_switch++) {
lower_bound=tau_min;
upper_bound=tau_end;
mid = 0.5*(lower_bound+upper_bound);
while (upper_bound - lower_bound > precision) {
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
mid,
ppw),
ppt->error_message,
ppt->error_message);
if (ppw->approx[index_ap] > flag_ini+index_switch) {
upper_bound=mid;
}
else {
lower_bound=mid;
}
mid = 0.5*(lower_bound+upper_bound);
}
unsorted_tau_switch[index_switch_tot]=mid;
index_switch_tot++;
tau_min=mid;
}
}
}
class_test(index_switch_tot != (interval_number-1),
ppt->error_message,
"bug in approximation switch search routine: should have %d = %d",
index_switch_tot,interval_number-1);
/** - now sort interval limits in correct order */
index_switch_tot=1;
while (index_switch_tot < interval_number) {
next_tau_switch=tau_end;
for (index_switch=0; index_switch<interval_number-1; index_switch++) {
if ((unsorted_tau_switch[index_switch] > interval_limit[index_switch_tot-1]) &&
(unsorted_tau_switch[index_switch] < next_tau_switch)) {
next_tau_switch=unsorted_tau_switch[index_switch];
}
}
interval_limit[index_switch_tot]=next_tau_switch;
index_switch_tot++;
}
interval_limit[index_switch_tot]=tau_end;
class_test(index_switch_tot != interval_number,
ppt->error_message,
"most probably two approximation switching time were found to be equal, which cannot be handled\n");
/** - store each approximation in chronological order */
for (index_switch=1; index_switch<interval_number; index_switch++) {
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
0.5*(interval_limit[index_switch]+interval_limit[index_switch+1]),
ppw),
ppt->error_message,
ppt->error_message);
for (index_ap=0; index_ap<ppw->ap_size; index_ap++) {
interval_approx[index_switch][index_ap]=ppw->approx[index_ap];
/* check here that approximation does not go backward (remember
that by definition the value of an approximation can only
increase) */
class_test(interval_approx[index_switch][index_ap] < interval_approx[index_switch-1][index_ap],
ppt->error_message,
"The approximation with label %d is not defined correctly: it goes backward (from %d to %d) for k=%e and between tau=%e and %e; this cannot be handled\n",
index_ap,
interval_approx[index_switch-1][index_ap],
interval_approx[index_switch][index_ap],
k,
0.5*(interval_limit[index_switch-1]+interval_limit[index_switch]),
0.5*(interval_limit[index_switch]+interval_limit[index_switch+1])
);
}
/* check here that more than one approximation is not switched on at a given time */
num_switching_at_given_time=0;
for (index_ap=0; index_ap<ppw->ap_size; index_ap++) {
if (interval_approx[index_switch][index_ap] != interval_approx[index_switch-1][index_ap])
num_switching_at_given_time++;
}
class_test(num_switching_at_given_time != 1,
ppt->error_message,
"for k=%e, at tau=%g, you switch %d approximations at the same time, this cannot be handled. Usually happens in two cases: triggers for different approximations coincide, or one approx is reversible\n",
k,
interval_limit[index_switch],
num_switching_at_given_time);
if (ppt->perturbations_verbose>2) {
if (_scalars_) {
if ((interval_approx[index_switch-1][ppw->index_ap_tca]==(int)tca_on) &&
(interval_approx[index_switch][ppw->index_ap_tca]==(int)tca_off))
fprintf(stdout,"Mode k=%e: will switch off tight-coupling approximation at tau=%e\n",k,interval_limit[index_switch]);
//fprintf(stderr,"Mode k=%e: will switch off tight-coupling approximation at tau=%e\n",k,interval_limit[index_switch]); //TBC
if ((interval_approx[index_switch-1][ppw->index_ap_rsa]==(int)rsa_off) &&
(interval_approx[index_switch][ppw->index_ap_rsa]==(int)rsa_on))
fprintf(stdout,"Mode k=%e: will switch on radiation streaming approximation at tau=%e\n",k,interval_limit[index_switch]);
if (pba->has_ur == _TRUE_) {
if ((interval_approx[index_switch-1][ppw->index_ap_ufa]==(int)ufa_off) &&
(interval_approx[index_switch][ppw->index_ap_ufa]==(int)ufa_on)) {
fprintf(stdout,"Mode k=%e: will switch on ur fluid approximation at tau=%e\n",k,interval_limit[index_switch]);
}
}
if (pba->has_ncdm == _TRUE_) {
if ((interval_approx[index_switch-1][ppw->index_ap_ncdmfa]==(int)ncdmfa_off) &&
(interval_approx[index_switch][ppw->index_ap_ncdmfa]==(int)ncdmfa_on)) {
fprintf(stdout,"Mode k=%e: will switch on ncdm fluid approximation at tau=%e\n",k,interval_limit[index_switch]);
}
}
}
if (_tensors_) {
if ((interval_approx[index_switch-1][ppw->index_ap_tca]==(int)tca_on) &&
(interval_approx[index_switch][ppw->index_ap_tca]==(int)tca_off))
fprintf(stdout,"Mode k=%e: will switch off tight-coupling approximation for tensors at tau=%e\n",k,interval_limit[index_switch]);
if ((interval_approx[index_switch-1][ppw->index_ap_rsa]==(int)rsa_off) &&
(interval_approx[index_switch][ppw->index_ap_rsa]==(int)rsa_on))
fprintf(stdout,"Mode k=%e: will switch on radiation streaming approximation for tensors at tau=%e\n",k,interval_limit[index_switch]);
}
}
}
free(unsorted_tau_switch);
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
tau_end,
ppw),
ppt->error_message,
ppt->error_message);
}
return _SUCCESS_;
}
/**
* Initialize the field '-->pv' of a perturb_workspace structure, which
* is a perturb_vector structure. This structure contains indices and
* values of all quantities which need to be integrated with respect
* to time (and only them: quantities fixed analytically or obeying
* constraint equations are NOT included in this vector). This routine
* distinguishes between two cases:
*
* --> the input pa_old is set to the NULL pointer:
*
* This happens when we start integrating over a new wavenumber and we
* want to set initial conditions for the perturbations. Then, it is
* assumed that ppw-->pv is not yet allocated. This routine allocates
* it, defines all indices, and then fills the vector ppw-->pv-->y with
* the initial conditions defined in perturb_initial_conditions.
*
* --> the input pa_old is not set to the NULL pointer and describes
* some set of approximations:
*
* This happens when we need to change approximation scheme while
* integrating over a given wavenumber. The new approximation
* described by ppw-->pa is then different from pa_old. Then, this
* routine allocates a new vector with a new size and new index
* values; it fills this vector with initial conditions taken from the
* previous vector passed as an input in ppw-->pv, and eventually with
* some analytic approximations for the new variables appearing at
* this time; then the new vector comes in replacement of the old one,
* which is freed.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to the thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param index_ic Input: index of initial condition under consideration (ad, iso...)
* @param k Input: wavenumber
* @param tau Input: conformal time
* @param ppw Input/Output: workspace containing in input the approximation scheme, the background/thermodynamics/metric quantities, and eventually the previous vector y; and in output the new vector y.
* @param pa_old Input: NULL is we need to set y to initial conditions for a new wavenumber; points towards a perturb_approximations if we want to switch of approximation.
* @return the error status
*/
int perturb_vector_init(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
int index_ic,
double k,
double tau,
struct perturb_workspace * ppw, /* ppw->pv unallocated if pa_old = NULL, allocated and filled otherwise */
int * pa_old
) {
/** Summary: */
/** - define local variables */
struct perturb_vector * ppv;
int index_pt;
int l;
int n_ncdm,index_q,ncdm_l_size;
double rho_plus_p_ncdm,q,q2,epsilon,a,factor;
/** - allocate a new perturb_vector structure to which ppw-->pv will point at the end of the routine */
class_alloc(ppv,sizeof(struct perturb_vector),ppt->error_message);
/** - initialize pointers to NULL (they will be allocated later if
needed), relevant for perturb_vector_free() */
ppv->l_max_ncdm = NULL;
ppv->q_size_ncdm = NULL;
/** - define all indices in this new vector (depends on approximation scheme, described by the input structure ppw-->pa) */
index_pt = 0;
if (_scalars_) {
/* reject inconsistent values of the number of mutipoles in photon temperature hierarchy */
class_test(ppr->l_max_g < 4,
ppt->error_message,
"ppr->l_max_g should be at least 4, i.e. we must integrate at least over photon density, velocity, shear, third and fourth momentum");
/* reject inconsistent values of the number of mutipoles in photon polarization hierarchy */
class_test(ppr->l_max_pol_g < 4,
ppt->error_message,
"ppr->l_max_pol_g should be at least 4");
/* reject inconsistent values of the number of mutipoles in decay radiation hierarchy */
if (pba->has_dr == _TRUE_) {
class_test(ppr->l_max_dr < 4,
ppt->error_message,
"ppr->l_max_dr should be at least 4, i.e. we must integrate at least over neutrino/relic density, velocity, shear, third and fourth momentum");
}
/* reject inconsistent values of the number of mutipoles in ultra relativistic neutrino hierarchy */
if (pba->has_ur == _TRUE_) {
class_test(ppr->l_max_ur < 4,
ppt->error_message,
"ppr->l_max_ur should be at least 4, i.e. we must integrate at least over neutrino/relic density, velocity, shear, third and fourth momentum");
}
/* photons */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
/* temperature */
ppv->l_max_g = ppr->l_max_g;
class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */
class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */
class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* higher momenta */
/* polarization */
ppv->l_max_pol_g = ppr->l_max_pol_g;
class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1);
class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1);
class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1);
class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2);
}
}
/* baryons */
class_define_index(ppv->index_pt_delta_b,_TRUE_,index_pt,1); /* baryon density */
class_define_index(ppv->index_pt_theta_b,_TRUE_,index_pt,1); /* baryon velocity */
/* cdm */
class_define_index(ppv->index_pt_delta_cdm,pba->has_cdm,index_pt,1); /* cdm density */
class_define_index(ppv->index_pt_theta_cdm,pba->has_cdm && (ppt->gauge == newtonian),index_pt,1); /* cdm velocity */
/* dcdm */
class_define_index(ppv->index_pt_delta_dcdm,pba->has_dcdm,index_pt,1); /* dcdm density */
class_define_index(ppv->index_pt_theta_dcdm,pba->has_dcdm,index_pt,1); /* dcdm velocity */
/* ultra relativistic decay radiation */
if (pba->has_dr==_TRUE_){
ppv->l_max_dr = ppr->l_max_dr;
class_define_index(ppv->index_pt_F0_dr,_TRUE_,index_pt,ppv->l_max_dr+1); /* all momenta in Boltzmann hierarchy */
}
/* fluid */
if (pba->use_ppf == _FALSE_) {
class_define_index(ppv->index_pt_delta_fld,pba->has_fld,index_pt,1); /* fluid density */
class_define_index(ppv->index_pt_theta_fld,pba->has_fld,index_pt,1); /* fluid velocity */
}
else {
class_define_index(ppv->index_pt_Gamma_fld,pba->has_fld,index_pt,1); /* Gamma variable of PPF scheme */
}
/* scalar field */
class_define_index(ppv->index_pt_phi_scf,pba->has_scf,index_pt,1); /* scalar field density */
class_define_index(ppv->index_pt_phi_prime_scf,pba->has_scf,index_pt,1); /* scalar field velocity */
/* perturbed recombination: the indices are defined once tca is off. */
if ( (ppt->has_perturbed_recombination == _TRUE_) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off) ){
class_define_index(ppv->index_pt_perturbed_recombination_delta_temp,_TRUE_,index_pt,1);
class_define_index(ppv->index_pt_perturbed_recombination_delta_chi,_TRUE_,index_pt,1);
}
/* ultra relativistic neutrinos */
if (pba->has_ur && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) {
class_define_index(ppv->index_pt_delta_ur,_TRUE_,index_pt,1); /* density of ultra-relativistic neutrinos/relics */
class_define_index(ppv->index_pt_theta_ur,_TRUE_,index_pt,1); /* velocity of ultra-relativistic neutrinos/relics */
class_define_index(ppv->index_pt_shear_ur,_TRUE_,index_pt,1); /* shear of ultra-relativistic neutrinos/relics */
if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
ppv->l_max_ur = ppr->l_max_ur;
class_define_index(ppv->index_pt_l3_ur,_TRUE_,index_pt,ppv->l_max_ur-2); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3) */
}
}
/* non-cold dark matter */
if (pba->has_ncdm == _TRUE_) {
ppv->index_pt_psi0_ncdm1 = index_pt; /* density of ultra-relativistic neutrinos/relics */
ppv->N_ncdm = pba->N_ncdm;
class_alloc(ppv->l_max_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message);
class_alloc(ppv->q_size_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message);
for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){
// Set value of ppv->l_max_ncdm:
if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_off){
/* reject inconsistent values of the number of mutipoles in ultra relativistic neutrino hierarchy */
class_test(ppr->l_max_ncdm < 4,
ppt->error_message,
"ppr->l_max_ncdm=%d should be at least 4, i.e. we must integrate at least over first four momenta of non-cold dark matter perturbed phase-space distribution",n_ncdm);
//Copy value from precision parameter:
ppv->l_max_ncdm[n_ncdm] = ppr->l_max_ncdm;
ppv->q_size_ncdm[n_ncdm] = pba->q_size_ncdm[n_ncdm];
}
else{
// In the fluid approximation, hierarchy is cut at lmax = 2 and q dependence is integrated out:
ppv->l_max_ncdm[n_ncdm] = 2;
ppv->q_size_ncdm[n_ncdm] = 1;
}
index_pt += (ppv->l_max_ncdm[n_ncdm]+1)*ppv->q_size_ncdm[n_ncdm];
}
}
/* metric (only quantities to be integrated, not those obeying constraint equations) */
/* metric perturbation eta of synchronous gauge */
class_define_index(ppv->index_pt_eta,ppt->gauge == synchronous,index_pt,1);
/* metric perturbation phi of newtonian gauge ( we could fix it
using Einstein equations as a constraint equation for phi, but
integration is numerically more stable if we actually evolve
phi) */
class_define_index(ppv->index_pt_phi,ppt->gauge == newtonian,index_pt,1);
}
if (_vectors_) {
/* Vector baryon velocity: v_b^{(1)}. */
class_define_index(ppv->index_pt_theta_b,_TRUE_,index_pt,1);
/* eventually reject inconsistent values of the number of mutipoles in photon temperature hierarchy and polarization*/
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */
ppv->l_max_g = ppr->l_max_g_ten;
class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */
class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */
class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */
class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* photon l=3 */
ppv->l_max_pol_g = ppr->l_max_pol_g_ten;
class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1); /* photon polarization, l=0 */
class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1); /* photon polarization, l=1 */
class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1); /* photon polarization, l=2 */
class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2); /* photon polarization, l=3 */
}
}
/** - (a) metric perturbations V or \f$ h_v \f$ depending on gauge */
if (ppt->gauge == synchronous){
class_define_index(ppv->index_pt_hv_prime,_TRUE_,index_pt,1);
}
if (ppt->gauge == newtonian){
class_define_index(ppv->index_pt_V,_TRUE_,index_pt,1);
}
}
if (_tensors_) {
/* reject inconsistent values of the number of mutipoles in photon temperature hierarchy */
class_test(ppr->l_max_g_ten < 4,
ppt->error_message,
"ppr->l_max_g_ten should be at least 4, i.e. we must integrate at least over photon density, velocity, shear, third momentum");
/* reject inconsistent values of the number of mutipoles in photon polarization hierarchy */
class_test(ppr->l_max_pol_g_ten < 4,
ppt->error_message,
"ppr->l_max_pol_g_ten should be at least 4");
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */
ppv->l_max_g = ppr->l_max_g_ten;
class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */
class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */
class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */
class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* photon l=3 */
ppv->l_max_pol_g = ppr->l_max_pol_g_ten;
class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1); /* photon polarization, l=0 */
class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1); /* photon polarization, l=1 */
class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1); /* photon polarization, l=2 */
class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2); /* photon polarization, l=3 */
}
}
/* ultra relativistic neutrinos */
class_define_index(ppv->index_pt_delta_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur density */
class_define_index(ppv->index_pt_theta_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur velocity */
class_define_index(ppv->index_pt_shear_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur shear */
ppv->l_max_ur = ppr->l_max_ur;
class_define_index(ppv->index_pt_l3_ur,ppt->evolve_tensor_ur,index_pt,ppv->l_max_ur-2); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3) */
if (ppt->evolve_tensor_ncdm == _TRUE_) {
ppv->index_pt_psi0_ncdm1 = index_pt;
ppv->N_ncdm = pba->N_ncdm;
class_alloc(ppv->l_max_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message);
class_alloc(ppv->q_size_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message);
for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){
// Set value of ppv->l_max_ncdm:
class_test(ppr->l_max_ncdm < 4,
ppt->error_message,
"ppr->l_max_ncdm=%d should be at least 4, i.e. we must integrate at least over first four momenta of non-cold dark matter perturbed phase-space distribution",n_ncdm);
//Copy value from precision parameter:
ppv->l_max_ncdm[n_ncdm] = ppr->l_max_ncdm;
ppv->q_size_ncdm[n_ncdm] = pba->q_size_ncdm[n_ncdm];
index_pt += (ppv->l_max_ncdm[n_ncdm]+1)*ppv->q_size_ncdm[n_ncdm];
}
}
/** - (b) metric perturbation h is a propagating degree of freedom, so h and hdot are included
in the vector of ordinary perturbations, no in that of metric perturbations */
class_define_index(ppv->index_pt_gw,_TRUE_,index_pt,1); /* tensor metric perturbation h (gravitational waves) */
class_define_index(ppv->index_pt_gwdot,_TRUE_,index_pt,1); /* its time-derivative */
}
ppv->pt_size = index_pt;
/** - allocate vectors for storing the values of all these
quantities and their time-derivatives at a given time */
class_calloc(ppv->y,ppv->pt_size,sizeof(double),ppt->error_message);
class_alloc(ppv->dy,ppv->pt_size*sizeof(double),ppt->error_message);
class_alloc(ppv->used_in_sources,ppv->pt_size*sizeof(int),ppt->error_message);
/** - specify which perturbations are needed in the evaluation of source terms */
/* take all of them by default */
for (index_pt=0; index_pt<ppv->pt_size; index_pt++)
ppv->used_in_sources[index_pt] = _TRUE_;
/* indicate which ones are not needed (this is just for saving time,
omitting perturbations in this list will not change the
results!) */
if (_scalars_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
/* we don't need temperature multipoles above l=2 (but they are
defined only when rsa and tca are off) */
for (index_pt=ppv->index_pt_l3_g; index_pt <= ppv->index_pt_delta_g+ppv->l_max_g; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
/* for polarization, we only need l=0,2 (but l =1,3, ... are
defined only when rsa and tca are off) */
ppv->used_in_sources[ppv->index_pt_pol1_g]=_FALSE_;
for (index_pt=ppv->index_pt_pol3_g; index_pt <= ppv->index_pt_pol0_g+ppv->l_max_pol_g; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
}
}
if (pba->has_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
/* we don't need ur multipoles above l=2 (but they are
defined only when rsa and ufa are off) */
for (index_pt=ppv->index_pt_l3_ur; index_pt <= ppv->index_pt_delta_ur+ppv->l_max_ur; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
}
}
}
if (pba->has_ncdm == _TRUE_) {
/* we don't need ncdm multipoles above l=2 (but they are
defined only when ncdmfa is off) */
index_pt = ppv->index_pt_psi0_ncdm1;
for(n_ncdm = 0; n_ncdm < ppv-> N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){
if (l>2) ppv->used_in_sources[index_pt]=_FALSE_;
index_pt++;
}
}
}
}
}
if (_tensors_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
/* we don't need temperature multipoles above except l=0,2,4 */
ppv->used_in_sources[ppv->index_pt_theta_g]=_FALSE_;
ppv->used_in_sources[ppv->index_pt_l3_g]=_FALSE_;
for (index_pt=ppv->index_pt_delta_g+5; index_pt <= ppv->index_pt_delta_g+ppv->l_max_g; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
/* same for polarization, we only need l=0,2,4 */
ppv->used_in_sources[ppv->index_pt_pol1_g]=_FALSE_;
ppv->used_in_sources[ppv->index_pt_pol3_g]=_FALSE_;
for (index_pt=ppv->index_pt_pol0_g+5; index_pt <= ppv->index_pt_pol0_g+ppv->l_max_pol_g; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
}
}
/* we need h' but not h */
ppv->used_in_sources[ppv->index_pt_gw]=_FALSE_;
}
/** - case of setting initial conditions for a new wavenumber */
if (pa_old == NULL) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: initializing vector at tau=%e\n",k,tau);
if (_scalars_) {
/** - --> (a) check that current approximation scheme is consistent
with initial conditions */
class_test(ppw->approx[ppw->index_ap_rsa] == (int)rsa_on,
ppt->error_message,
"scalar initial conditions assume radiation streaming approximation turned off");
if (pba->has_ur == _TRUE_) {
class_test(ppw->approx[ppw->index_ap_ufa] == (int)ufa_on,
ppt->error_message,
"scalar initial conditions assume ur fluid approximation turned off");
}
if (pba->has_ncdm == _TRUE_) {
class_test(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on,
ppt->error_message,
"scalar initial conditions assume ncdm fluid approximation turned off");
}
class_test(ppw->approx[ppw->index_ap_tca] == (int)tca_off,
ppt->error_message,
"scalar initial conditions assume tight-coupling approximation turned on");
}
if (_tensors_) {
class_test(ppw->approx[ppw->index_ap_tca] == (int)tca_off,
ppt->error_message,
"tensor initial conditions assume tight-coupling approximation turned on");
class_test(ppw->approx[ppw->index_ap_rsa] == (int)rsa_on,
ppt->error_message,
"tensor initial conditions assume radiation streaming approximation turned off");
}
/** - --> (b) let ppw-->pv points towards the perturb_vector structure
that we just created */
ppw->pv = ppv;
/** - --> (c) fill the vector ppw-->pv-->y with appropriate initial conditions */
class_call(perturb_initial_conditions(ppr,
pba,
ppt,
index_md,
index_ic,
k,
tau,
ppw),
ppt->error_message,
ppt->error_message);
}
/** - case of switching approximation while a wavenumber is being integrated */
else {
/** - --> (a) for the scalar mode: */
if (_scalars_) {
/** - ---> (a.1.) check that the change of approximation scheme makes
sense (note: before calling this routine there is already a
check that we wish to change only one approximation flag at
a time) */
class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on),
ppt->error_message,
"at tau=%g: the tight-coupling approximation can be switched off, not on",tau);
/** - ---> (a.2.) some variables (b, cdm, fld, ...) are not affected by
any approximation. They need to be reconducted whatever
the approximation switching is. We treat them here. Below
we will treat other variables case by case. */
ppv->y[ppv->index_pt_delta_b] =
ppw->pv->y[ppw->pv->index_pt_delta_b];
ppv->y[ppv->index_pt_theta_b] =
ppw->pv->y[ppw->pv->index_pt_theta_b];
if (pba->has_cdm == _TRUE_) {
ppv->y[ppv->index_pt_delta_cdm] =
ppw->pv->y[ppw->pv->index_pt_delta_cdm];
if (ppt->gauge == newtonian) {
ppv->y[ppv->index_pt_theta_cdm] =
ppw->pv->y[ppw->pv->index_pt_theta_cdm];
}
}
if (pba->has_dcdm == _TRUE_) {
ppv->y[ppv->index_pt_delta_dcdm] =
ppw->pv->y[ppw->pv->index_pt_delta_dcdm];
ppv->y[ppv->index_pt_theta_dcdm] =
ppw->pv->y[ppw->pv->index_pt_theta_dcdm];
}
if (pba->has_dr == _TRUE_){
for (l=0; l <= ppv->l_max_dr; l++)
ppv->y[ppv->index_pt_F0_dr+l] =
ppw->pv->y[ppw->pv->index_pt_F0_dr+l];
}
if (pba->has_fld == _TRUE_) {
if (pba->use_ppf == _FALSE_) {
ppv->y[ppv->index_pt_delta_fld] =
ppw->pv->y[ppw->pv->index_pt_delta_fld];
ppv->y[ppv->index_pt_theta_fld] =
ppw->pv->y[ppw->pv->index_pt_theta_fld];
}
else {
ppv->y[ppv->index_pt_Gamma_fld] =
ppw->pv->y[ppw->pv->index_pt_Gamma_fld];
}
}
if (pba->has_scf == _TRUE_) {
ppv->y[ppv->index_pt_phi_scf] =
ppw->pv->y[ppw->pv->index_pt_phi_scf];
ppv->y[ppv->index_pt_phi_prime_scf] =
ppw->pv->y[ppw->pv->index_pt_phi_prime_scf];
}
if (ppt->gauge == synchronous)
ppv->y[ppv->index_pt_eta] =
ppw->pv->y[ppw->pv->index_pt_eta];
if (ppt->gauge == newtonian)
ppv->y[ppv->index_pt_phi] =
ppw->pv->y[ppw->pv->index_pt_phi];
/* -- case of switching off tight coupling
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau);
ppv->y[ppv->index_pt_delta_g] =
ppw->pv->y[ppw->pv->index_pt_delta_g];
ppv->y[ppv->index_pt_theta_g] =
ppw->pv->y[ppw->pv->index_pt_theta_g];
/* tight-coupling approximation for shear_g (previously
computed in perturb_derivs: perturb_derivs is always
called at the end of generic_evolver, in order to update
all quantities in ppw to the time at which the
approximation is switched off) */
ppv->y[ppv->index_pt_shear_g] = ppw->tca_shear_g;
ppv->y[ppv->index_pt_l3_g] = 6./7.*k/ppw->pvecthermo[pth->index_th_dkappa]*ppw->s_l[3]*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for l=3 */
ppv->y[ppv->index_pt_pol0_g] = 2.5*ppv->y[ppv->index_pt_shear_g]; /* first-order tight-coupling approximation for polarization, l=0 */
ppv->y[ppv->index_pt_pol1_g] = k/ppw->pvecthermo[pth->index_th_dkappa]*(5.-2.*ppw->s_l[2])/6.*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for polarization, l=1 */
ppv->y[ppv->index_pt_pol2_g] = 0.5*ppv->y[ppv->index_pt_shear_g]; /* first-order tight-coupling approximation for polarization, l=2 */
ppv->y[ppv->index_pt_pol3_g] = k/ppw->pvecthermo[pth->index_th_dkappa]*3.*ppw->s_l[3]/14.*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for polarization, l=3 */
if (pba->has_ur == _TRUE_) {
ppv->y[ppv->index_pt_delta_ur] =
ppw->pv->y[ppw->pv->index_pt_delta_ur];
ppv->y[ppv->index_pt_theta_ur] =
ppw->pv->y[ppw->pv->index_pt_theta_ur];
ppv->y[ppv->index_pt_shear_ur] =
ppw->pv->y[ppw->pv->index_pt_shear_ur];
if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
ppv->y[ppv->index_pt_l3_ur] =
ppw->pv->y[ppw->pv->index_pt_l3_ur];
for (l=4; l <= ppv->l_max_ur; l++)
ppv->y[ppv->index_pt_delta_ur+l] =
ppw->pv->y[ppw->pv->index_pt_delta_ur+l];
}
}
if (pba->has_ncdm == _TRUE_) {
index_pt = 0;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm];l++){
// This is correct with or without ncdmfa, since ppv->lmax_ncdm is set accordingly.
ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] =
ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt];
index_pt++;
}
}
}
}
/* perturbed recombination */
/* the initial conditions are set when tca is switched off (current block) */
if (ppt->has_perturbed_recombination == _TRUE_){
ppv->y[ppv->index_pt_perturbed_recombination_delta_temp] = 1./3.*ppv->y[ppw->pv->index_pt_delta_b];
ppv->y[ppv->index_pt_perturbed_recombination_delta_chi] =0.;
}
} // end of block tca ON -> tca OFF
/* perturbed recombination */
/* For any other transition in the approximation scheme, we should just copy the value of the perturbations, provided tca is already off (otherwise the indices are not yet allocated). For instance, we do not want to copy the values in the (k,tau) region where both UFA and TCA are engaged.*/
if ((ppt->has_perturbed_recombination == _TRUE_)&&(pa_old[ppw->index_ap_tca]==(int)tca_off)){
ppv->y[ppv->index_pt_perturbed_recombination_delta_temp] =
ppw->pv->y[ppw->pv->index_pt_perturbed_recombination_delta_temp];
ppv->y[ppv->index_pt_perturbed_recombination_delta_chi] =
ppw->pv->y[ppw->pv->index_pt_perturbed_recombination_delta_chi];
}
/* -- case of switching on radiation streaming
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]);
if (pba->has_ncdm == _TRUE_) {
index_pt = 0;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){
ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] =
ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt];
index_pt++;
}
}
}
}
}
/* -- case of switching on ur fluid
approximation. Provide correct initial conditions to new set
of variables */
if (pba->has_ur == _TRUE_) {
if ((pa_old[ppw->index_ap_ufa] == (int)ufa_off) && (ppw->approx[ppw->index_ap_ufa] == (int)ufa_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on ur fluid approximation at tau=%e\n",k,tau);
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
ppv->y[ppv->index_pt_delta_g] =
ppw->pv->y[ppw->pv->index_pt_delta_g];
ppv->y[ppv->index_pt_theta_g] =
ppw->pv->y[ppw->pv->index_pt_theta_g];
}
if ((ppw->approx[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) {
ppv->y[ppv->index_pt_shear_g] =
ppw->pv->y[ppw->pv->index_pt_shear_g];
ppv->y[ppv->index_pt_l3_g] =
ppw->pv->y[ppw->pv->index_pt_l3_g];
for (l = 4; l <= ppw->pv->l_max_g; l++) {
ppv->y[ppv->index_pt_delta_g+l] =
ppw->pv->y[ppw->pv->index_pt_delta_g+l];
}
ppv->y[ppv->index_pt_pol0_g] =
ppw->pv->y[ppw->pv->index_pt_pol0_g];
ppv->y[ppv->index_pt_pol1_g] =
ppw->pv->y[ppw->pv->index_pt_pol1_g];
ppv->y[ppv->index_pt_pol2_g] =
ppw->pv->y[ppw->pv->index_pt_pol2_g];
ppv->y[ppv->index_pt_pol3_g] =
ppw->pv->y[ppw->pv->index_pt_pol3_g];
for (l = 4; l <= ppw->pv->l_max_pol_g; l++) {
ppv->y[ppv->index_pt_pol0_g+l] =
ppw->pv->y[ppw->pv->index_pt_pol0_g+l];
}
}
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
ppv->y[ppv->index_pt_delta_ur] =
ppw->pv->y[ppw->pv->index_pt_delta_ur];
ppv->y[ppv->index_pt_theta_ur] =
ppw->pv->y[ppw->pv->index_pt_theta_ur];
ppv->y[ppv->index_pt_shear_ur] =
ppw->pv->y[ppw->pv->index_pt_shear_ur];
}
if (pba->has_ncdm == _TRUE_) {
index_pt = 0;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){
/* This is correct even when ncdmfa == off, since ppv->l_max_ncdm and
ppv->q_size_ncdm is updated.*/
ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] =
ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt];
index_pt++;
}
}
}
}
}
}
/* -- case of switching on ncdm fluid
approximation. Provide correct initial conditions to new set
of variables */
if (pba->has_ncdm == _TRUE_) {
if ((pa_old[ppw->index_ap_ncdmfa] == (int)ncdmfa_off) && (ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on ncdm fluid approximation at tau=%e\n",k,tau);
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
ppv->y[ppv->index_pt_delta_g] =
ppw->pv->y[ppw->pv->index_pt_delta_g];
ppv->y[ppv->index_pt_theta_g] =
ppw->pv->y[ppw->pv->index_pt_theta_g];
}
if ((ppw->approx[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) {
ppv->y[ppv->index_pt_shear_g] =
ppw->pv->y[ppw->pv->index_pt_shear_g];
ppv->y[ppv->index_pt_l3_g] =
ppw->pv->y[ppw->pv->index_pt_l3_g];
for (l = 4; l <= ppw->pv->l_max_g; l++) {
ppv->y[ppv->index_pt_delta_g+l] =
ppw->pv->y[ppw->pv->index_pt_delta_g+l];
}
ppv->y[ppv->index_pt_pol0_g] =
ppw->pv->y[ppw->pv->index_pt_pol0_g];
ppv->y[ppv->index_pt_pol1_g] =
ppw->pv->y[ppw->pv->index_pt_pol1_g];
ppv->y[ppv->index_pt_pol2_g] =
ppw->pv->y[ppw->pv->index_pt_pol2_g];
ppv->y[ppv->index_pt_pol3_g] =
ppw->pv->y[ppw->pv->index_pt_pol3_g];
for (l = 4; l <= ppw->pv->l_max_pol_g; l++) {
ppv->y[ppv->index_pt_pol0_g+l] =
ppw->pv->y[ppw->pv->index_pt_pol0_g+l];
}
}
if (pba->has_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
ppv->y[ppv->index_pt_delta_ur] =
ppw->pv->y[ppw->pv->index_pt_delta_ur];
ppv->y[ppv->index_pt_theta_ur] =
ppw->pv->y[ppw->pv->index_pt_theta_ur];
ppv->y[ppv->index_pt_shear_ur] =
ppw->pv->y[ppw->pv->index_pt_shear_ur];
if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
ppv->y[ppv->index_pt_l3_ur] =
ppw->pv->y[ppw->pv->index_pt_l3_ur];
for (l=4; l <= ppv->l_max_ur; l++)
ppv->y[ppv->index_pt_delta_ur+l] =
ppw->pv->y[ppw->pv->index_pt_delta_ur+l];
}
}
}
a = ppw->pvecback[pba->index_bg_a];
index_pt = ppw->pv->index_pt_psi0_ncdm1;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
// We are in the fluid approximation, so ncdm_l_size is always 3.
ncdm_l_size = ppv->l_max_ncdm[n_ncdm]+1;
rho_plus_p_ncdm = ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+
ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm];
for(l=0; l<=2; l++){
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+l] = 0.0;
}
factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4);
for(index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q++){
// Integrate over distributions:
q = pba->q_ncdm[n_ncdm][index_q];
q2 = q*q;
epsilon = sqrt(q2+a*a*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]);
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm] +=
pba->w_ncdm[n_ncdm][index_q]*q2*epsilon*
ppw->pv->y[index_pt];
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+1] +=
pba->w_ncdm[n_ncdm][index_q]*q2*q*
ppw->pv->y[index_pt+1];
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+2] +=
pba->w_ncdm[n_ncdm][index_q]*q2*q2/epsilon*
ppw->pv->y[index_pt+2];
//Jump to next momentum bin in ppw->pv->y:
index_pt += (ppw->pv->l_max_ncdm[n_ncdm]+1);
}
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm] *=factor/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+1] *=k*factor/rho_plus_p_ncdm;
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+2] *=2.0/3.0*factor/rho_plus_p_ncdm;
}
}
}
}
/** - --> (b) for the vector mode */
if (_vectors_) {
/** - ---> (b.1.) check that the change of approximation scheme makes
sense (note: before calling this routine there is already a
check that we wish to change only one approximation flag at
a time) */
class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on),
ppt->error_message,
"at tau=%g: the tight-coupling approximation can be switched off, not on",tau);
/** - ---> (b.2.) some variables (gw, gwdot, ...) are not affected by
any approximation. They need to be reconducted whatever
the approximation switching is. We treat them here. Below
we will treat other variables case by case. */
if (ppt->gauge == synchronous){
ppv->y[ppv->index_pt_hv_prime] =
ppw->pv->y[ppw->pv->index_pt_hv_prime];
}
if (ppt->gauge == newtonian){
ppv->y[ppv->index_pt_V] =
ppw->pv->y[ppw->pv->index_pt_V];
}
ppv->y[ppv->index_pt_theta_b] =
ppw->pv->y[ppw->pv->index_pt_theta_b];
/* -- case of switching off tight coupling
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau);
ppv->y[ppv->index_pt_delta_g] = 0.0; //TBC
//-4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa];
ppv->y[ppv->index_pt_pol0_g] = 0.0; //TBC
//1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa];
}
/* -- case of switching on radiation streaming
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]);
}
}
/** - --> (c) for the tensor mode */
if (_tensors_) {
/** - ---> (c.1.) check that the change of approximation scheme makes
sense (note: before calling this routine there is already a
check that we wish to change only one approximation flag at
a time) */
class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on),
ppt->error_message,
"at tau=%g: the tight-coupling approximation can be switched off, not on",tau);
/** - ---> (c.2.) some variables (gw, gwdot, ...) are not affected by
any approximation. They need to be reconducted whatever
the approximation switching is. We treat them here. Below
we will treat other variables case by case. */
ppv->y[ppv->index_pt_gw] =
ppw->pv->y[ppw->pv->index_pt_gw];
ppv->y[ppv->index_pt_gwdot] =
ppw->pv->y[ppw->pv->index_pt_gwdot];
if (ppt->evolve_tensor_ur == _TRUE_){
/* For now, neutrinos go here. */
ppv->y[ppv->index_pt_delta_ur] =
ppw->pv->y[ppw->pv->index_pt_delta_ur];
ppv->y[ppv->index_pt_theta_ur] =
ppw->pv->y[ppw->pv->index_pt_theta_ur];
ppv->y[ppv->index_pt_shear_ur] =
ppw->pv->y[ppw->pv->index_pt_shear_ur];
ppv->y[ppv->index_pt_l3_ur] =
ppw->pv->y[ppw->pv->index_pt_l3_ur];
for (l=4; l <= ppv->l_max_ur; l++)
ppv->y[ppv->index_pt_delta_ur+l] =
ppw->pv->y[ppw->pv->index_pt_delta_ur+l];
}
if (ppt->evolve_tensor_ncdm == _TRUE_){
index_pt = 0;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm];l++){
// This is correct with or without ncdmfa, since ppv->lmax_ncdm is set accordingly.
ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] =
ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt];
index_pt++;
}
}
}
}
/* -- case of switching off tight coupling
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau);
ppv->y[ppv->index_pt_delta_g] = -4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa];
ppv->y[ppv->index_pt_pol0_g] = 1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa];
}
/* -- case of switching on radiation streaming
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]);
}
}
/** - --> (d) free the previous vector of perturbations */
class_call(perturb_vector_free(ppw->pv),
ppt->error_message,
ppt->error_message);
/** - --> (e) let ppw-->pv points towards the perturb_vector structure
that we just created */
ppw->pv = ppv;
}
return _SUCCESS_;
}
/**
* Free the perturb_vector structure.
*
* @param pv Input: pointer to perturb_vector structure to be freed
* @return the error status
*/
int perturb_vector_free(
struct perturb_vector * pv
) {
if (pv->l_max_ncdm != NULL) free(pv->l_max_ncdm);
if (pv->q_size_ncdm != NULL) free(pv->q_size_ncdm);
free(pv->y);
free(pv->dy);
free(pv->used_in_sources);
free(pv);
return _SUCCESS_;
}
/**
* For each mode, wavenumber and initial condition, this function
* initializes in the vector all values of perturbed variables (in a
* given gauge). It is assumed here that all values have previously been
* set to zero, only non-zero values are set here.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param index_ic Input: index of initial condition under consideration (ad, iso...)
* @param k Input: wavenumber
* @param tau Input: conformal time
* @param ppw Input/Output: workspace containing in input the approximation scheme, the background/thermodynamics/metric quantities, and eventually the previous vector y; and in output the new vector y.
* @return the error status
*/
int perturb_initial_conditions(struct precision * ppr,
struct background * pba,
struct perturbs * ppt,
int index_md,
int index_ic,
double k,
double tau,
struct perturb_workspace * ppw
) {
/** Summary: */
/** --> Declare local variables */
double a,a_prime_over_a;
double w_fld,dw_over_da_fld,integral_fld;
double delta_ur=0.,theta_ur=0.,shear_ur=0.,l3_ur=0.,eta=0.,delta_cdm=0.,alpha, alpha_prime;
double delta_dr=0;
double q,epsilon,k2;
int index_q,n_ncdm,idx;
double rho_r,rho_m,rho_nu,rho_m_over_rho_r;
double fracnu,fracg,fracb,fraccdm,om;
double ktau_two,ktau_three;
double f_dr;
double delta_tot;
double velocity_tot;
double s2_squared;
/** --> For scalars */
if (_scalars_) {
/** - (a) compute relevant background quantities: compute rho_r,
rho_m, rho_nu (= all relativistic except photons), and their
ratio. */
class_call(background_at_tau(pba,
tau,
pba->normal_info,
pba->inter_normal,
&(ppw->last_index_back),
ppw->pvecback),
pba->error_message,
ppt->error_message);
a = ppw->pvecback[pba->index_bg_a];
a_prime_over_a = ppw->pvecback[pba->index_bg_H]*a;
/* 8piG/3 rho_r(t_i) */
rho_r = ppw->pvecback[pba->index_bg_rho_g];
/* 8piG/3 rho_m(t_i) */
rho_m = ppw->pvecback[pba->index_bg_rho_b];
/* 8piG/3 rho_nu(t_i) (all neutrinos and collisionless relics being relativistic at that time) */
rho_nu = 0.;
if (pba->has_cdm == _TRUE_) {
rho_m += ppw->pvecback[pba->index_bg_rho_cdm];
}
if (pba->has_dcdm == _TRUE_) {
rho_m += ppw->pvecback[pba->index_bg_rho_dcdm];
}
if (pba->has_dr == _TRUE_) {
rho_r += ppw->pvecback[pba->index_bg_rho_dr];
rho_nu += ppw->pvecback[pba->index_bg_rho_dr];
}
if (pba->has_ur == _TRUE_) {
rho_r += ppw->pvecback[pba->index_bg_rho_ur];
rho_nu += ppw->pvecback[pba->index_bg_rho_ur];
}
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm=0; n_ncdm<pba->N_ncdm; n_ncdm++){
rho_r += ppw->pvecback[pba->index_bg_rho_ncdm1 + n_ncdm];
rho_nu += ppw->pvecback[pba->index_bg_rho_ncdm1 + n_ncdm];
}
}
class_test(rho_r == 0.,
ppt->error_message,
"stop to avoid division by zero");
/* f_nu = Omega_nu(t_i) / Omega_r(t_i) */
fracnu = rho_nu/rho_r;
/* f_g = Omega_g(t_i) / Omega_r(t_i) */
fracg = ppw->pvecback[pba->index_bg_rho_g]/rho_r;
/* f_b = Omega_b(t_i) / Omega_m(t_i) */
fracb = ppw->pvecback[pba->index_bg_rho_b]/rho_m;
/* f_cdm = Omega_cdm(t_i) / Omega_m(t_i) */
fraccdm = 1.-fracb;
/* Omega_m(t_i) / Omega_r(t_i) */
rho_m_over_rho_r = rho_m/rho_r;
/* omega = Omega_m(t_i) a(t_i) H(t_i) / sqrt(Omega_r(t_i))
= Omega_m(t_0) a(t_0) H(t_0) / sqrt(Omega_r(t_0)) assuming rho_m in a-3 and rho_r in a^-4
= (8piG/3 rho_m(t_i)) a(t_i) / sqrt(8piG/3 rho_r(t_i)) in Mpc-1
This (a priori strange) parameter is the relevant one for expressing a
as a function of tau during radiation and matter domination (but not DE domination).
Indeed the exact solution of Friedmann when there is only radiation and matter in
the universe is
a = [H(t_0)^2 Omega_m(t_0) a(t_0)^3 / 4] x [tau^2 + 4 tau / omega]
*/
om = a*rho_m/sqrt(rho_r);
/* (k tau)^2, (k tau)^3 */
ktau_two=k*k*tau*tau;
ktau_three=k*tau*ktau_two;
/* curvature-dependent factors */
s2_squared = 1.-3.*pba->K/k/k;
/** - (b) starts by setting everything in synchronous gauge. If
another gauge is needed, we will perform a gauge
transformation below. */
/** - --> (b.1.) adiabatic */
if ((ppt->has_ad == _TRUE_) && (index_ic == ppt->index_ic_ad)) {
/* The following formulas are valid at leading order in
(k*tau) and (om*tau), and order zero in
tight-coupling. Identical to first order terms in CRS,
except for normalization (when ppr->curvature_ini=1, tau=1:
leads to factor 1/2 difference between CRS formulas with
beta1=0). Identical to CAMB when om set to zero in theta_g,
theta_ur, shear_ur, tau
In the non-flat case the relation R=eta is still valid
outside the horizon for adiabatic IC. Hence eta is still
set to ppr->curvature_ini at leading order. Factors s2
appear through the solution of Einstein equations and
equations of motion. */
/* photon density */
ppw->pv->y[ppw->pv->index_pt_delta_g] = - ktau_two/3. * (1.-om*tau/5.)
* ppr->curvature_ini * s2_squared;
/* photon velocity */
ppw->pv->y[ppw->pv->index_pt_theta_g] = - k*ktau_three/36. * (1.-3.*(1.+5.*fracb-fracnu)/20./(1.-fracnu)*om*tau)
* ppr->curvature_ini * s2_squared;
/* tighly-coupled baryons */
ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* baryon density */
ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g]; /* baryon velocity */
if (pba->has_cdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_cdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* cdm density */
/* cdm velocity vanishes in the synchronous gauge */
}
if (pba->has_dcdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_dcdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* dcdm density */
/* dcdm velocity velocity vanishes initially in the synchronous gauge */
}
/* fluid (assumes wa=0, if this is not the case the
fluid will catch anyway the attractor solution) */
if (pba->has_fld == _TRUE_) {
class_call(background_w_fld(pba,a,&w_fld,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
if (pba->use_ppf == _FALSE_) {
ppw->pv->y[ppw->pv->index_pt_delta_fld] = - ktau_two/4.*(1.+w_fld)*(4.-3.*pba->cs2_fld)/(4.-6.*w_fld+3.*pba->cs2_fld) * ppr->curvature_ini * s2_squared; /* from 1004.5509 */ //TBC: curvature
ppw->pv->y[ppw->pv->index_pt_theta_fld] = - k*ktau_three/4.*pba->cs2_fld/(4.-6.*w_fld+3.*pba->cs2_fld) * ppr->curvature_ini * s2_squared; /* from 1004.5509 */ //TBC:curvature
}
/* if use_ppf == _TRUE_, y[ppw->pv->index_pt_Gamma_fld] will be automatically set to zero, and this is what we want (although one could probably work out some small nonzero initial conditions: TODO) */
}
if (pba->has_scf == _TRUE_) {
/** - ---> Canonical field (solving for the perturbations):
* initial perturbations set to zero, they should reach the attractor soon enough.
* - ---> TODO: Incorporate the attractor IC from 1004.5509.
* delta_phi \f$ = -(a/k)^2/\phi'(\rho + p)\theta \f$,
* delta_phi_prime \f$ = a^2/\phi' \f$ (delta_rho_phi + V'delta_phi),
* and assume theta, delta_rho as for perfect fluid
* with \f$ c_s^2 = 1 \f$ and w = 1/3 (ASSUMES radiation TRACKING)
*/
ppw->pv->y[ppw->pv->index_pt_phi_scf] = 0.;
/* a*a/k/k/ppw->pvecback[pba->index_bg_phi_prime_scf]*k*ktau_three/4.*1./(4.-6.*(1./3.)+3.*1.) * (ppw->pvecback[pba->index_bg_rho_scf] + ppw->pvecback[pba->index_bg_p_scf])* ppr->curvature_ini * s2_squared; */
ppw->pv->y[ppw->pv->index_pt_phi_prime_scf] = 0.;
/* delta_fld expression * rho_scf with the w = 1/3, c_s = 1
a*a/ppw->pvecback[pba->index_bg_phi_prime_scf]*( - ktau_two/4.*(1.+1./3.)*(4.-3.*1.)/(4.-6.*(1/3.)+3.*1.)*ppw->pvecback[pba->index_bg_rho_scf] - ppw->pvecback[pba->index_bg_dV_scf]*ppw->pv->y[ppw->pv->index_pt_phi_scf])* ppr->curvature_ini * s2_squared; */
}
/* all relativistic relics: ur, early ncdm, dr */
if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_) || (pba->has_dr == _TRUE_)) {
delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g]; /* density of ultra-relativistic neutrinos/relics */
theta_ur = - k*ktau_three/36./(4.*fracnu+15.) * (4.*fracnu+11.+12.*s2_squared-3.*(8.*fracnu*fracnu+50.*fracnu+275.)/20./(2.*fracnu+15.)*tau*om) * ppr->curvature_ini * s2_squared; /* velocity of ultra-relativistic neutrinos/relics */ //TBC
shear_ur = ktau_two/(45.+12.*fracnu) * (3.*s2_squared-1.) * (1.+(4.*fracnu-5.)/4./(2.*fracnu+15.)*tau*om) * ppr->curvature_ini;//TBC /s2_squared; /* shear of ultra-relativistic neutrinos/relics */ //TBC:0
l3_ur = ktau_three*2./7./(12.*fracnu+45.)* ppr->curvature_ini;//TBC
if (pba->has_dr == _TRUE_) delta_dr = delta_ur;
}
/* synchronous metric perturbation eta */
//eta = ppr->curvature_ini * (1.-ktau_two/12./(15.+4.*fracnu)*(5.+4.*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om)) / s2_squared;
//eta = ppr->curvature_ini * s2_squared * (1.-ktau_two/12./(15.+4.*fracnu)*(15.*s2_squared-10.+4.*s2_squared*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om));
eta = ppr->curvature_ini * (1.-ktau_two/12./(15.+4.*fracnu)*(5.+4.*s2_squared*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om));
}
/* isocurvature initial conditions taken from Bucher, Moodely,
Turok 99, with just a different normalization convention for
tau and the scale factor. [k tau] from BMT99 is left invariant
because it is the ratio [k/aH]. But [Omega_i,0 tau] from BMT99
must be replaced by [frac_i*om*tau/4]. Some doubts remain about
the niv formulas, that should be recheked at some point. We
also checked that for bi,cdi,nid, everything coincides exactly
with the CAMB formulas. */
/** - --> (b.2.) Cold dark matter Isocurvature */
if ((ppt->has_cdi == _TRUE_) && (index_ic == ppt->index_ic_cdi)) {
class_test(pba->has_cdm == _FALSE_,
ppt->error_message,
"not consistent to ask for CDI in absence of CDM!");
ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fraccdm*om*tau*(-2./3.+om*tau/4.);
ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fraccdm*om*ktau_two/12.;
ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g];
ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g];
ppw->pv->y[ppw->pv->index_pt_delta_cdm] = ppr->entropy_ini+3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g];
if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_)) {
delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g];
theta_ur = ppw->pv->y[ppw->pv->index_pt_theta_g];
shear_ur = -ppr->entropy_ini*fraccdm*ktau_two*tau*om/6./(2.*fracnu+15.);
}
eta = -ppr->entropy_ini*fraccdm*om*tau*(1./6.-om*tau/16.);
}
/** - --> (b.3.) Baryon Isocurvature */
if ((ppt->has_bi == _TRUE_) && (index_ic == ppt->index_ic_bi)) {
ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fracb*om*tau*(-2./3.+om*tau/4.);
ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fracb*om*ktau_two/12.;
ppw->pv->y[ppw->pv->index_pt_delta_b] = ppr->entropy_ini+3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g];
ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g];
if (pba->has_cdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_cdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g];
}
if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_)) {
delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g];
theta_ur = ppw->pv->y[ppw->pv->index_pt_theta_g];
shear_ur = -ppr->entropy_ini*fracb*ktau_two*tau*om/6./(2.*fracnu+15.);
}
eta = -ppr->entropy_ini*fracb*om*tau*(1./6.-om*tau/16.);
}
/** - --> (b.4.) Neutrino density Isocurvature */
if ((ppt->has_nid == _TRUE_) && (index_ic == ppt->index_ic_nid)) {
class_test((pba->has_ur == _FALSE_) && (pba->has_ncdm == _FALSE_),
ppt->error_message,
"not consistent to ask for NID in absence of ur or ncdm species!");
ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fracnu/fracg*(-1.+ktau_two/6.);
ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fracnu/fracg*k*k*tau*(1./4.-fracb/fracg*3./16.*om*tau);
ppw->pv->y[ppw->pv->index_pt_delta_b] = ppr->entropy_ini*fracnu/fracg/8.*ktau_two;
ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g];
if (pba->has_cdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_cdm] = -ppr->entropy_ini*fracnu*fracb/fracg/80.*ktau_two*om*tau;
}
delta_ur = ppr->entropy_ini*(1.-ktau_two/6.);
theta_ur = ppr->entropy_ini*k*k*tau/4.;
shear_ur = ppr->entropy_ini*ktau_two/(4.*fracnu+15.)/2.;
eta = -ppr->entropy_ini*fracnu/(4.*fracnu+15.)/6.*ktau_two;
}
/** - --> (b.5.) Neutrino velocity Isocurvature */
if ((ppt->has_niv == _TRUE_) && (index_ic == ppt->index_ic_niv)) {
class_test((pba->has_ur == _FALSE_) && (pba->has_ncdm == _FALSE_),
ppt->error_message,
"not consistent to ask for NIV in absence of ur or ncdm species!");
ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*k*tau*fracnu/fracg*
(1. - 3./16.*fracb*(2.+fracg)/fracg*om*tau); /* small diff wrt camb */
ppw->pv->y[ppw->pv->index_pt_theta_g] = ppr->entropy_ini*fracnu/fracg*3./4.*k*
(-1.+3./4.*fracb/fracg*om*tau+3./16.*om*om*tau*tau*fracb/fracg/fracg*(fracg-3.*fracb)+ktau_two/6.);
ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* small diff wrt camb */
ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g];
if (pba->has_cdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_cdm] = -ppr->entropy_ini*9./64.*fracnu*fracb/fracg*k*tau*om*tau;
}
delta_ur = -ppr->entropy_ini*k*tau*(1.+3./16.*fracb*fracnu/fracg*om*tau); /* small diff wrt camb */
theta_ur = ppr->entropy_ini*3./4.*k*(1. - 1./6.*ktau_two*(4.*fracnu+9.)/(4.*fracnu+5.));
shear_ur = ppr->entropy_ini/(4.*fracnu+15.)*k*tau*(1. + 3.*om*tau*fracnu/(4.*fracnu+15.)); /* small diff wrt camb */
eta = ppr->entropy_ini*fracnu*k*tau*(-1./(4.*fracnu+5.) + (-3./64.*fracb/fracg+15./4./(4.*fracnu+15.)/(4.*fracnu+5.)*om*tau)); /* small diff wrt camb */
}
/** - (c) If the needed gauge is really the synchronous gauge, we need to affect the previously computed value of eta to the actual variable eta */
if (ppt->gauge == synchronous) {
ppw->pv->y[ppw->pv->index_pt_eta] = eta;
}
/** - (d) If the needed gauge is the newtonian gauge, we must compute alpha and then perform a gauge transformation for each variable */
if (ppt->gauge == newtonian) {
/* alpha is like in Ma & Bertschinger: (h'+6 eta')/(2k^2). We obtain it from the first two Einstein equations:
alpha = [eta + 3/2 (a'/a)^2 (delta_rho/rho_c) / k^2 /s_2^2 + 3/2 (a'/a)^3 3 ((rho+p)theta/rho_c) / k^4 / s_2^2] / (a'/a)
= [eta + 3/2 (a'/a)^2 / k^2 /s_2^2 {delta_tot + 3 (a'/a) /k^2 velocity_tot}] / (a'/a)
with
delta_tot = (delta_rho/rho_c)
= [rho_r delta_r + rho_m delta_m] / (rho_r + rho_m)
= [delta_r + (rho_m/rho_r) delta_m] / (1 + rho_m/rho_r)
= [(f_g delta_g + f_nu delta_nu) + (rho_m/rho_r) (f_b delta_b + f_cdm delta_cdm)] / (1 + rho_m/rho_r)
velocity_tot = ((rho+p)theta/rho_c)
= [(4/3) rho_r theta_r + rho_m theta_m] / (rho_r + rho_m)
= [(4/3) theta_r + (rho_m/rho_r) theta_m] / (1 + rho_m/rho_r)
= [(4/3) (f_g theta_g + f_nu theta_nu) + (rho_m/rho_r) (f_b delta_b + f_cdm 0)] / (1 + rho_m/rho_r)
*/
if (pba->has_cdm == _TRUE_)
delta_cdm = ppw->pv->y[ppw->pv->index_pt_delta_cdm];
else if (pba->has_dcdm == _TRUE_)
delta_cdm = ppw->pv->y[ppw->pv->index_pt_delta_dcdm];
else
delta_cdm=0.;
// note: if there are no neutrinos, fracnu, delta_ur and theta_ur below will consistently be zero.
delta_tot = (fracg*ppw->pv->y[ppw->pv->index_pt_delta_g]+fracnu*delta_ur+rho_m_over_rho_r*(fracb*ppw->pv->y[ppw->pv->index_pt_delta_b]+fraccdm*delta_cdm))/(1.+rho_m_over_rho_r);
velocity_tot = ((4./3.)*(fracg*ppw->pv->y[ppw->pv->index_pt_theta_g]+fracnu*theta_ur) + rho_m_over_rho_r*fracb*ppw->pv->y[ppw->pv->index_pt_theta_b])/(1.+rho_m_over_rho_r);
alpha = (eta + 3./2.*a_prime_over_a*a_prime_over_a/k/k/s2_squared*(delta_tot + 3.*a_prime_over_a/k/k*velocity_tot))/a_prime_over_a;
ppw->pv->y[ppw->pv->index_pt_phi] = eta - a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_delta_g] -= 4.*a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_g] += k*k*alpha;
ppw->pv->y[ppw->pv->index_pt_delta_b] -= 3.*a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_b] += k*k*alpha;
if (pba->has_cdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_cdm] -= 3.*a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_cdm] = k*k*alpha;
}
if (pba->has_dcdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_dcdm] += (-3.*a_prime_over_a - a*pba->Gamma_dcdm)*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_dcdm] = k*k*alpha;
}
/* fluid */
if ((pba->has_fld == _TRUE_) && (pba->use_ppf == _FALSE_)) {
class_call(background_w_fld(pba,a,&w_fld,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
ppw->pv->y[ppw->pv->index_pt_delta_fld] += 3*(1.+w_fld)*a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_fld] += k*k*alpha;
}
/* scalar field: check */
if (pba->has_scf == _TRUE_) {
alpha_prime = 0.0;
/* - 2. * a_prime_over_a * alpha + eta
- 4.5 * (a2/k2) * ppw->rho_plus_p_shear; */
ppw->pv->y[ppw->pv->index_pt_phi_scf] += alpha*ppw->pvecback[pba->index_bg_phi_prime_scf];
ppw->pv->y[ppw->pv->index_pt_phi_prime_scf] +=
(-2.*a_prime_over_a*alpha*ppw->pvecback[pba->index_bg_phi_prime_scf]
-a*a* dV_scf(pba,ppw->pvecback[pba->index_bg_phi_scf])*alpha
+ppw->pvecback[pba->index_bg_phi_prime_scf]*alpha_prime);
}
if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_) || (pba->has_dr == _TRUE_)) {
delta_ur -= 4.*a_prime_over_a*alpha;
theta_ur += k*k*alpha;
/* shear and l3 are gauge invariant */
if (pba->has_dr == _TRUE_)
delta_dr += (-4.*a_prime_over_a + a*pba->Gamma_dcdm*ppw->pvecback[pba->index_bg_rho_dcdm]/ppw->pvecback[pba->index_bg_rho_dr])*alpha;
}
} /* end of gauge transformation to newtonian gauge */
/** - (e) In any gauge, we should now implement the relativistic initial conditions in ur and ncdm variables */
if (pba->has_ur == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_ur] = delta_ur;
ppw->pv->y[ppw->pv->index_pt_theta_ur] = theta_ur;
ppw->pv->y[ppw->pv->index_pt_shear_ur] = shear_ur;
ppw->pv->y[ppw->pv->index_pt_l3_ur] = l3_ur;
}
if (pba->has_ncdm == _TRUE_) {
idx = ppw->pv->index_pt_psi0_ncdm1;
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q++) {
q = pba->q_ncdm[n_ncdm][index_q];
epsilon = sqrt(q*q+a*a*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]);
ppw->pv->y[idx] = -0.25 * delta_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
ppw->pv->y[idx+1] = -epsilon/3./q/k*theta_ur* pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
ppw->pv->y[idx+2] = -0.5 * shear_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
ppw->pv->y[idx+3] = -0.25 * l3_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
//Jump to next momentum bin:
idx += (ppw->pv->l_max_ncdm[n_ncdm]+1);
}
}
}
if (pba->has_dr == _TRUE_) {
f_dr = pow(pow(a/pba->a_today,2)/pba->H0,2)*ppw->pvecback[pba->index_bg_rho_dr];
ppw->pv->y[ppw->pv->index_pt_F0_dr] = delta_dr*f_dr;
ppw->pv->y[ppw->pv->index_pt_F0_dr+1] = 4./(3.*k)*theta_ur*f_dr;
ppw->pv->y[ppw->pv->index_pt_F0_dr+2] = 2.*shear_ur*f_dr;
ppw->pv->y[ppw->pv->index_pt_F0_dr+3] = l3_ur*f_dr;
}
}
/** --> For tensors */
if (_tensors_) {
/** tensor initial conditions take into account the fact that
scalar (resp. tensor) \f$ C_l\f$'s are related to the real space
power spectrum of curvature (resp. of the tensor part of
metric perturbations)
\f[ <R(x) R(x)> \ \ \sum_{ij} <h_{ij}(x) h^{ij}(x)> \f]
In momentum space it is conventional to use the modes R(k)
and h(k) where the quantity h obeying to the equation of
propagation:
\f[ h'' + \frac{2a'}{a} h + [k2+2K] h = 12\pi Ga2 (\rho+p) \sigma = 8\pi Ga2 p \pi \f]
and the power spectra in real space and momentum space are related through:
\f[ <R(x) R(x)> = \int \frac{dk}{k} \left[ \frac{k^3}{2\pi^2} <R(k)R(k)^*>\right] = \int \frac{dk}{k} \mathcal{P}_R(k) \f]
\f[\sum_{ij} <h_{ij}(x) h^{ij}(x)> = \frac{dk}{k} \left[ \frac{k^3}{2\pi^2} F\left(\frac{k^2}{K}\right) <h(k)h(k)^*>\right] = \int \frac{dk}{k} F\left(\frac{k^2}{K}\right) \mathcal{P}_h(k) \f]
where \f$ \mathcal{P}_R\f$ and \f$ \mathcal{P}_h\f$ are the dimensionless spectrum of
curvature R, and F is a function of k2/K, where K is the curvature
parameter. F is equal to one in flat space (K=0), and coming
from the contraction of the laplacian eigentensor \f$ Q_{ij}\f$ with
itself. We will give F explicitly below.
Similarly the scalar (S) and tensor (T) \f$ C_l\f$'s are given by
\f[ C_l^S = 4\pi \int \frac{dk}{k} [\Delta_l^S(q)]^2 \mathcal{P}_R(k) \f]
\f[ C_l^T = 4\pi \int \frac{dk}{k} [\Delta_l^T(q)]^2 F\left(\frac{k^2}{K}\right) \mathcal{P}_h(k) \f]
The usual convention for the tensor-to-scalar ratio
\f$ r = A_t / A_s \f$ at pivot scale
= 16 epsilon in single-field inflation
is such that for constant \f$ \mathcal{P}_R(k)\f$ and \f$ \mathcal{P}_h(k)\f$,
\f[ r = 6 \frac{\mathcal{P}_h(k)}{\mathcal{P}_R(k)} \f]
so
\f[ \mathcal{P}_h(k) = \frac{\mathcal{P}_R(k) r}{6} = \frac{A_s r}{6} = \frac{A_t}{6} \f]
A priori it would make sense to say that for a power-law
primordial spectrum there is an extra factor \f$ (k/k_{pivot})^{n_t} \f$
(and eventually running and so on and so forth...)
However it has been shown that the minimal models of
inflation in a negatively curved bubble lead to
\f$ \mathcal{P}_h(k)=\tanh(\pi*\nu/2)\f$. In open models it is customary to
define the tensor tilt in a non-flat universe as a deviation
from this behavior rather than from true scale-invariance in
the above sense.
Hence we should have
\f[ \mathcal{P}_h(k) = \frac{A_t}{6} [ \tanh(\pi*\frac{\nu}{2})] (k/k_{pivot})^{(n_t+...)}\f]
where the brackets \f[ [...] \f] mean "if K<0"
Then
\f[ C_l^T = 4\pi \int \frac{dk}{k} [\Delta_l^T(q)]^2 F\left(\frac{k^2}{K}\right) \frac{A_t}{6} [\tanh(\pi*\frac{\nu}{2})] (k/k_{pivot})^{(n_t+...)} \f]
In the code, it is then a matter of choice to write:
- In the primordial module: \f$ \mathcal{P}_h(k) = \frac{A_t}{6} \tanh{(\pi*\frac{\nu}{2})} (k/k^*)^{n_T}\f$
- In the perturbation initial conditions: \f$ h = 1\f$
- In the spectra module: \f$ C_l^T = \frac{4}{\pi} \int \frac{dk}{k} [\Delta_l^T(q)]^2 F\left(\frac{k^2}{K}\right) \mathcal{P}_h(k) \f$
or:
- In the primordial module: \f$ \mathcal{P}_h(k) = A_t (k/k^*)^{n_T} \f$
- In the perturbation initial conditions: \f$ h = \sqrt{[F\left(\frac{k^2}{K}\right) / 6] \tanh{(\pi*\frac{\nu}{2})}} \f$
- In the spectra module: \f$ C_l^T = \frac{4}{\pi} \int \frac{dk}{k} [\Delta_l^T(q)]^2 \mathcal{P}_h(k) \f$
We choose this last option, such that the primordial and
spectra module differ minimally in flat and non-flat space. Then we must impose
\f[ h = \sqrt{\left(\frac{F}{6}\right) \tanh{(\pi*\frac{\nu}{2})}} \f]
The factor F is found to be given by:
\f[ \sum_{ij}<h_{ij}(x) h^{ij}(x)> = \int \frac{dk}{k} \frac{k2(k2-K)}{(k2+3K)(k2+2K)} \mathcal{P}_h(k) \f]
Introducing as usual \f$ q2 = k2 - 3K \f$ and using qdq = kdk this gives
\f[ \sum_{ij}<h_{ij}(x) h^{ij}(x)> = \int \frac{dk}{k} \frac{(q2-3K)(q2-4K)}{q2(q2-K)} \mathcal{P}_h(k) \f]
Using qdq = kdk this is equivalent to
\f[ \sum_{ij}<h_{ij}(x) h^{ij}(x)> = \int \frac{dq}{q} \frac{q2-4K}{q2-K} \mathcal{P}_h(k(q)) \f]
Finally, introducing \f$ \nu=q/\sqrt{|K|}\f$ and sgnK=SIGN(k)\f$=\pm 1\f$, this could also be written
\f[ \sum_{ij}<h_{ij}(x) h^{ij}(x)> = \int \frac{d\nu}{\nu} \frac{(\nu2-4sgnK)}{(\nu2-sgnK)} \mathcal{P}_h(k(\nu)) \f]
Equation (43,44) of Hu, Seljak, White, Zaldarriaga is
equivalent to absorbing the above factor
\f$ (\nu2-4sgnK)/(\nu2-sgnK)\f$ in the definition of the primordial
spectrum. Since the initial condition should be written in terms of k rather than nu, they should read
\f[ h = \sqrt{ [k2(k2-K)]/[(k2+3K)(k2+2K)] / 6 * \tanh{(\pi*\frac{\nu}{2})} } \f]
We leave the freedom to multiply by an arbitrary number
ppr->gw_ini. The standard convention corresponding to
standard definitions of r, \f$ A_T\f$, \f$ n_T\f$ is however ppr->gw_ini=1.
*
*/
if (index_ic == ppt->index_ic_ten) {
ppw->pv->y[ppw->pv->index_pt_gw] = ppr->gw_ini/_SQRT6_;
}
k2 = k*k;
if (pba->sgnK != 0) {
ppw->pv->y[ppw->pv->index_pt_gw] *= sqrt(k2*(k2-pba->K)/(k2+3.*pba->K)/(k2+2.*pba->K));
}
if (pba->sgnK == -1) {
if (k*k+3*pba->K >= 0.) {
ppw->pv->y[ppw->pv->index_pt_gw] *= sqrt(tanh(_PI_/2.*sqrt(k2+3*pba->K)/sqrt(-pba->K)));
}
else {
ppw->pv->y[ppw->pv->index_pt_gw] = 0.;
}
}
}
return _SUCCESS_;
}
/**
* Evaluate background/thermodynamics at \f$ \tau \f$, infer useful flags / time scales for integrating perturbations.
*
* Evaluate background quantities at \f$ \tau \f$, as well as thermodynamics for scalar mode; infer useful flags and time scales for integrating the perturbations:
* - check whether tight-coupling approximation is needed.
* - check whether radiation (photons, massless neutrinos...) perturbations are needed.
* - choose step of integration: step = ppr->perturb_integration_stepsize * min_time_scale, where min_time_scale = smallest time scale involved in the equations. There are three time scales to compare:
* -# that of recombination, \f$ \tau_c = 1/\kappa' \f$
* -# Hubble time scale, \f$ \tau_h = a/a' \f$
* -# Fourier mode, \f$ \tau_k = 1/k \f$
*
* So, in general, min_time_scale = \f$ \min(\tau_c, \tau_b, \tau_h, \tau_k) \f$.
*
* However, if \f$ \tau_c \ll \tau_h \f$ and \f$ \tau_c
* \ll \tau_k \f$, we can use the tight-coupling regime for photons
* and write equations in such way that the time scale \f$
* \tau_c \f$ becomes irrelevant (no effective mass term in \f$
* 1/\tau_c \f$). Then, the smallest
* scale in the equations is only \f$ \min(\tau_h, \tau_k) \f$.
* In practise, it is sufficient to use only the condition \f$ \tau_c \ll \tau_h \f$.
*
* Also, if \f$ \rho_{matter} \gg \rho_{radiation} \f$ and \f$ k \gg
* aH \f$, we can switch off radiation perturbations (i.e. switch on
* the free-streaming approximation) and then the smallest scale is
* simply \f$ \tau_h \f$.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param k Input: wavenumber
* @param tau Input: conformal time
* @param ppw Input/Output: in output contains the approximation to be used at this time
* @return the error status
*/
int perturb_approximations(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
double k,
double tau,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
/* (a) time scale of Fourier mode, \f$ \tau_k = 1/k \f$ */
double tau_k;
/* (b) time scale of expansion, \f$ \tau_h = a/a' \f$ */
double tau_h;
/* (c) time scale of recombination, \f$ \tau_{\gamma} = 1/\kappa' \f$ */
double tau_c;
/** - compute Fourier mode time scale = \f$ \tau_k = 1/k \f$ */
class_test(k == 0.,
ppt->error_message,
"stop to avoid division by zero");
tau_k = 1./k;
/** - evaluate background quantities with background_at_tau() and
Hubble time scale \f$ \tau_h = a/a' \f$ */
class_call(background_at_tau(pba,tau, pba->normal_info, ppw->inter_mode, &(ppw->last_index_back), ppw->pvecback),
pba->error_message,
ppt->error_message);
class_test(ppw->pvecback[pba->index_bg_H]*ppw->pvecback[pba->index_bg_a] == 0.,
ppt->error_message,
"aH=0, stop to avoid division by zero");
tau_h = 1./(ppw->pvecback[pba->index_bg_H]*ppw->pvecback[pba->index_bg_a]);
/** - for scalar modes: */
if (_scalars_) {
/** - --> (a) evaluate thermodynamical quantities with thermodynamics_at_z() */
class_call(thermodynamics_at_z(pba,
pth,
1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
ppw->inter_mode,
&(ppw->last_index_thermo),
ppw->pvecback,
ppw->pvecthermo),
pth->error_message,
ppt->error_message);
/** - ---> (b.1.) if \f$ \kappa'=0 \f$, recombination is finished; tight-coupling approximation must be off */
if (ppw->pvecthermo[pth->index_th_dkappa] == 0.) {
ppw->approx[ppw->index_ap_tca] = (int)tca_off;
}
/** - ---> (b.2.) if \f$ \kappa' \neq 0 \f$, recombination is not finished: check tight-coupling approximation */
else {
/** - ----> (b.2.a) compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */
tau_c = 1./ppw->pvecthermo[pth->index_th_dkappa];
class_test(tau_c < 0.,
ppt->error_message,
"tau_c = 1/kappa' should always be positive unless there is something wrong in the thermodynamics module. However you have here tau_c=%e at z=%e, conformal time=%e x_e=%e. (This could come from the interpolation of a too poorly sampled reionisation history?).\n",
tau_c,
1./ppw->pvecback[pba->index_bg_a]-1.,
tau,
ppw->pvecthermo[pth->index_th_xe]);
/** - ----> (b.2.b) check whether tight-coupling approximation should be on */
if ((tau_c/tau_h < ppr->tight_coupling_trigger_tau_c_over_tau_h) &&
(tau_c/tau_k < ppr->tight_coupling_trigger_tau_c_over_tau_k)) {
ppw->approx[ppw->index_ap_tca] = (int)tca_on;
}
else {
ppw->approx[ppw->index_ap_tca] = (int)tca_off;
}
}
/** - --> (c) free-streaming approximations */
if ((tau/tau_k > ppr->radiation_streaming_trigger_tau_over_tau_k) &&
(tau > pth->tau_free_streaming) &&
(ppr->radiation_streaming_approximation != rsa_none)) {
ppw->approx[ppw->index_ap_rsa] = (int)rsa_on;
}
else {
ppw->approx[ppw->index_ap_rsa] = (int)rsa_off;
}
if (pba->has_ur == _TRUE_) {
if ((tau/tau_k > ppr->ur_fluid_trigger_tau_over_tau_k) &&
(ppr->ur_fluid_approximation != ufa_none)) {
ppw->approx[ppw->index_ap_ufa] = (int)ufa_on;
}
else {
ppw->approx[ppw->index_ap_ufa] = (int)ufa_off;
}
}
if (pba->has_ncdm == _TRUE_) {
if ((tau/tau_k > ppr->ncdm_fluid_trigger_tau_over_tau_k) &&
(ppr->ncdm_fluid_approximation != ncdmfa_none)) {
ppw->approx[ppw->index_ap_ncdmfa] = (int)ncdmfa_on;
}
else {
ppw->approx[ppw->index_ap_ncdmfa] = (int)ncdmfa_off;
}
}
}
/** - for tensor modes: */
if (_tensors_) {
/** - --> (a) evaluate thermodynamical quantities with thermodynamics_at_z() */
class_call(thermodynamics_at_z(pba,
pth,
1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
ppw->inter_mode,
&(ppw->last_index_thermo),
ppw->pvecback,
ppw->pvecthermo),
pth->error_message,
ppt->error_message);
/** - ---> (b.1.) if \f$ \kappa'=0 \f$, recombination is finished; tight-coupling approximation must be off */
if (ppw->pvecthermo[pth->index_th_dkappa] == 0.) {
ppw->approx[ppw->index_ap_tca] = (int)tca_off;
}
/** - ---> (b.2.) if \f$ \kappa' \neq 0 \f$, recombination is not finished: check tight-coupling approximation */
else {
/** - ----> (b.2.a) compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */
tau_c = 1./ppw->pvecthermo[pth->index_th_dkappa];
/** - ----> (b.2.b) check whether tight-coupling approximation should be on */
if ((tau_c/tau_h < ppr->tight_coupling_trigger_tau_c_over_tau_h) &&
(tau_c/tau_k < ppr->tight_coupling_trigger_tau_c_over_tau_k)) {
ppw->approx[ppw->index_ap_tca] = (int)tca_on;
}
else {
ppw->approx[ppw->index_ap_tca] = (int)tca_off;
}
}
if ((tau/tau_k > ppr->radiation_streaming_trigger_tau_over_tau_k) &&
(tau > pth->tau_free_streaming) &&
(ppr->radiation_streaming_approximation != rsa_none)) {
ppw->approx[ppw->index_ap_rsa] = (int)rsa_on;
}
else {
ppw->approx[ppw->index_ap_rsa] = (int)rsa_off;
}
}
return _SUCCESS_;
}
/**
* Compute typical timescale over which the perturbation equations
* vary. Some integrators (e.g. Runge-Kunta) benefit from calling this
* routine at each step in order to adapt the next step.
*
* This is one of the few functions in the code which is passed to the generic_integrator() routine.
* Since generic_integrator() should work with functions passed from various modules, the format of the arguments
* is a bit special:
* - fixed parameters and workspaces are passed through a generic pointer.
* generic_integrator() doesn't know the content of this pointer.
* - the error management is a bit special: errors are not written as usual to pth->error_message, but to a generic
* error_message passed in the list of arguments.
*
* @param tau Input: conformal time
* @param parameters_and_workspace Input: fixed parameters (e.g. indices), workspace, approximation used, etc.
* @param timescale Output: perturbation variation timescale (given the approximation used)
* @param error_message Output: error message
*/
int perturb_timescale(
double tau,
void * parameters_and_workspace,
double * timescale,
ErrorMsg error_message
) {
/** Summary: */
/** - define local variables */
/* (a) time scale of Fourier mode, \f$ \tau_k = 1/k \f$ */
double tau_k;
/* (b) time scale of expansion, \f$ \tau_h = a/a' \f$ */
double tau_h;
/* (c) time scale of recombination, \f$ \tau_{\gamma} = 1/\kappa' \f$ */
double tau_c;
/* various pointers allowing to extract the fields of the
parameter_and_workspace input structure */
struct perturb_parameters_and_workspace * pppaw;
struct background * pba;
struct thermo * pth;
struct perturbs * ppt;
struct perturb_workspace * ppw;
double * pvecback;
double * pvecthermo;
/** - extract the fields of the parameter_and_workspace input structure */
pppaw = parameters_and_workspace;
pba = pppaw->pba;
pth = pppaw->pth;
ppt = pppaw->ppt;
ppw = pppaw->ppw;
pvecback = ppw->pvecback;
pvecthermo = ppw->pvecthermo;
/** - compute Fourier mode time scale = \f$ \tau_k = 1/k \f$ */
class_test(pppaw->k == 0.,
ppt->error_message,
"stop to avoid division by zero");
tau_k = 1./pppaw->k;
/** - evaluate background quantities with background_at_tau() and
Hubble time scale \f$ \tau_h = a/a' \f$ */
class_call(background_at_tau(pba,tau, pba->normal_info, ppw->inter_mode, &(ppw->last_index_back), pvecback),
pba->error_message,
error_message);
class_test(pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a] == 0.,
error_message,
"aH=0, stop to avoid division by zero");
tau_h = 1./(pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]);
/** - for scalars modes: */
if ((ppt->has_scalars == _TRUE_) && (pppaw->index_md == ppt->index_md_scalars)) {
*timescale = tau_h;
if ((ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) || (pba->has_ncdm == _TRUE_))
*timescale = MIN(tau_k,*timescale);
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
ppw->inter_mode,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
if (pvecthermo[pth->index_th_dkappa] != 0.) {
/** - --> compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */
tau_c = 1./pvecthermo[pth->index_th_dkappa];
*timescale = MIN(tau_c,*timescale);
}
}
}
/** - for vector modes: */
if ((ppt->has_vectors == _TRUE_) && (pppaw->index_md == ppt->index_md_vectors)) {
*timescale = MIN(tau_h,tau_k);
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
ppw->inter_mode,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
if (pvecthermo[pth->index_th_dkappa] != 0.) {
/** - --> compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */
tau_c = 1./pvecthermo[pth->index_th_dkappa];
*timescale = MIN(tau_c,*timescale);
}
}
}
/** - for tensor modes: */
if ((ppt->has_tensors == _TRUE_) && (pppaw->index_md == ppt->index_md_tensors)) {
*timescale = MIN(tau_h,tau_k);
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
ppw->inter_mode,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
if (pvecthermo[pth->index_th_dkappa] != 0.) {
/** - --> compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */
tau_c = 1./pvecthermo[pth->index_th_dkappa];
*timescale = MIN(tau_c,*timescale);
}
}
}
return _SUCCESS_;
}
/**
* Compute metric perturbations (those not integrated over time) using Einstein equations
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param k Input: wavenumber
* @param tau Input: conformal time
* @param y Input: vector of perturbations (those integrated over time) (already allocated)
* @param ppw Input/Output: in output contains the updated metric perturbations
* @return the error status
*/
int perturb_einstein(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
double k,
double tau,
double * y,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
double k2,a,a2,a_prime_over_a;
double s2_squared;
double shear_g = 0.;
/** - define wavenumber and scale factor related quantities */
k2 = k*k;
a = ppw->pvecback[pba->index_bg_a];
a2 = a * a;
a_prime_over_a = ppw->pvecback[pba->index_bg_H]*a;
s2_squared = 1.-3.*pba->K/k2;
/** - sum up perturbations from all species */
class_call(perturb_total_stress_energy(ppr,pba,pth,ppt,index_md,k,y,ppw),
ppt->error_message,
ppt->error_message);
/** - for scalar modes: */
if (_scalars_) {
/** - --> infer metric perturbations from Einstein equations */
/* newtonian gauge */
if (ppt->gauge == newtonian) {
/* in principle we could get phi from the constrain equation:
ppw->pvecmetric[ppw->index_mt_phi] = -1.5 * (a2/k2/k2/s2/s2) * (k2 * delta_rho + 3.*a_prime_over_a * rho_plus_p_theta);
with s2_squared = sqrt(1-3K/k2) = ppw->s_l[2]*ppw->s_l[2]
This was the case in class v1.3. However the integration is
more stable is we treat phi as a dynamical variable
y[ppw->pv->index_pt_phi], which derivative is given by the
second equation below (credits to Guido Walter Pettinari). */
/* equation for psi */
ppw->pvecmetric[ppw->index_mt_psi] = y[ppw->pv->index_pt_phi] - 4.5 * (a2/k2) * ppw->rho_plus_p_shear;
/* equation for phi' */
ppw->pvecmetric[ppw->index_mt_phi_prime] = -a_prime_over_a * ppw->pvecmetric[ppw->index_mt_psi] + 1.5 * (a2/k2) * ppw->rho_plus_p_theta;
/* eventually, infer radiation streaming approximation for
gamma and ur (this is exactly the right place to do it
because the result depends on h_prime) */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
class_call(perturb_rsa_delta_and_theta(ppr,pba,pth,ppt,k,y,a_prime_over_a,ppw->pvecthermo,ppw),
ppt->error_message,
ppt->error_message);
}
}
/* synchronous gauge */
if (ppt->gauge == synchronous) {
/* first equation involving total density fluctuation */
ppw->pvecmetric[ppw->index_mt_h_prime] =
( k2 * s2_squared * y[ppw->pv->index_pt_eta] + 1.5 * a2 * ppw->delta_rho)/(0.5*a_prime_over_a); /* h' */
/* eventually, infer radiation streaming approximation for
gamma and ur (this is exactly the right place to do it
because the result depends on h_prime) */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
class_call(perturb_rsa_delta_and_theta(ppr,pba,pth,ppt,k,y,a_prime_over_a,ppw->pvecthermo,ppw),
ppt->error_message,
ppt->error_message);
/* update total theta given rsa approximation results */
ppw->rho_plus_p_theta += 4./3.*ppw->pvecback[pba->index_bg_rho_g]*ppw->rsa_theta_g;
if (pba->has_ur == _TRUE_) {
ppw->rho_plus_p_theta += 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*ppw->rsa_theta_ur;
}
}
/* second equation involving total velocity */
ppw->pvecmetric[ppw->index_mt_eta_prime] = (1.5 * a2 * ppw->rho_plus_p_theta + 0.5 * pba->K * ppw->pvecmetric[ppw->index_mt_h_prime])/k2/s2_squared; /* eta' */
/* third equation involving total pressure */
ppw->pvecmetric[ppw->index_mt_h_prime_prime] =
- 2. * a_prime_over_a * ppw->pvecmetric[ppw->index_mt_h_prime]
+ 2. * k2 * s2_squared * y[ppw->pv->index_pt_eta]
- 9. * a2 * ppw->delta_p;
/* alpha = (h'+6eta')/2k^2 */
ppw->pvecmetric[ppw->index_mt_alpha] = (ppw->pvecmetric[ppw->index_mt_h_prime] + 6.*ppw->pvecmetric[ppw->index_mt_eta_prime])/2./k2;
/* eventually, infer first-order tight-coupling approximation for photon
shear, then correct the total shear */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_on) {
shear_g = 16./45./ppw->pvecthermo[pth->index_th_dkappa]*(y[ppw->pv->index_pt_theta_g]+k2*ppw->pvecmetric[ppw->index_mt_alpha]);
ppw->rho_plus_p_shear += 4./3.*ppw->pvecback[pba->index_bg_rho_g]*shear_g;
}
/* fourth equation involving total shear */
ppw->pvecmetric[ppw->index_mt_alpha_prime] = //TBC
- 2. * a_prime_over_a * ppw->pvecmetric[ppw->index_mt_alpha]
+ y[ppw->pv->index_pt_eta]
- 4.5 * (a2/k2) * ppw->rho_plus_p_shear;
}
/* transform (delta_m, theta_m) of the current gauge into
gauge-independent variables (you could comment this out if you
really want gauge-dependent results) */
if (ppt->has_source_delta_m == _TRUE_) {
ppw->delta_m += 3. *ppw->pvecback[pba->index_bg_a]*ppw->pvecback[pba->index_bg_H] * ppw->theta_m/k2;
// note: until 2.4.3 there was a typo, the factor was (-2 H'/H) instead
// of (3 aH). There is the same typo in the CLASSgal paper
// 1307.1459v1,v2,v3. It came from a confusion between (1+w_total)
// and (1+w_matter)=1 [the latter is the relevant one here].
//
// note2: at this point this gauge-invariant variable is only
// valid if all matter components are pressureless and
// stable. This relation will be generalized soon to the case
// of decaying dark matter.
}
if (ppt->has_source_delta_cb == _TRUE_) {
ppw->delta_cb += 3. *ppw->pvecback[pba->index_bg_a]*ppw->pvecback[pba->index_bg_H] * ppw->theta_cb/k2;//check gauge transformation
}
if (ppt->has_source_theta_m == _TRUE_) {
if (ppt->gauge == synchronous) {
ppw->theta_m += ppw->pvecmetric[ppw->index_mt_alpha]*k2;
}
}
if (ppt->has_source_theta_cb == _TRUE_){
if (ppt->gauge == synchronous) {
ppw->theta_cb += ppw->pvecmetric[ppw->index_mt_alpha]*k2; //check gauge transformation
}
}
}
/** - for vector modes */
if (_vectors_) {
if (ppt->gauge == newtonian) {
ppw->pvecmetric[ppw->index_mt_V_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_V] - 3.*ppw->vector_source_pi/k;
}
if (ppt->gauge == synchronous) {
// assuming vector_source_pi = p_class a^2 pi_T^{(1)} and vector_source_v = (rho_class+p_class)a^2 v^{(1)}
// from Hu and White:
ppw->pvecmetric[ppw->index_mt_hv_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_hv_prime] - 3.*ppw->vector_source_pi/k2;
// what we suspect:
//ppw->pvecmetric[ppw->index_mt_hv_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_hv_prime] - 3.*ppw->vector_source_pi;
// if we use the other equation:
//ppw->pvecmetric[ppw->index_mt_hv_prime] = -2./k/ (1.-2.*pba->K/k2) * 3. * ppw->vector_source_v;
}
}
/** - for tensor modes */
if (_tensors_) {
/* single einstein equation for tensor perturbations */
ppw->pvecmetric[ppw->index_mt_gw_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_gwdot]-(k2+2.*pba->K)*y[ppw->pv->index_pt_gw]+ppw->gw_source;
}
return _SUCCESS_;
}
int perturb_total_stress_energy(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
double k,
double * y,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
double a,a2,a_prime_over_a,k2;
double rho_plus_p_tot=0.;
double delta_g=0.;
double theta_g=0.;
double shear_g=0.;
double delta_ur=0.;
double theta_ur=0.;
double shear_ur=0.;
double rho_delta_ncdm=0.;
double rho_plus_p_theta_ncdm=0.;
double rho_plus_p_shear_ncdm=0.;
double delta_p_ncdm=0.;
double factor;
double rho_plus_p_ncdm;
int index_q,n_ncdm,idx;
double epsilon,q,q2,cg2_ncdm,w_ncdm,rho_ncdm_bg,p_ncdm_bg,pseudo_p_ncdm;
double rho_m,delta_rho_m,rho_plus_p_m,rho_plus_p_theta_m;
double w_fld,dw_over_da_fld,integral_fld;
double gwncdm;
double rho_relativistic;
double rho_dr_over_f;
double delta_rho_scf, delta_p_scf, psi;
double c_gamma_k_H_square;
double Gamma_prime_plus_a_prime_over_a_Gamma, alpha=0., s2sq=1.;
/** - wavenumber and scale factor related quantities */
a = ppw->pvecback[pba->index_bg_a];
a2 = a * a;
a_prime_over_a = ppw->pvecback[pba->index_bg_H]*a;
k2 = k*k;
/** - for scalar modes */
if (_scalars_) {
/** - --> (a) deal with approximation schemes */
/** - ---> (a.1.) photons */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
/** - ----> (a.1.1.) no approximation */
delta_g = y[ppw->pv->index_pt_delta_g];
theta_g = y[ppw->pv->index_pt_theta_g];
shear_g = y[ppw->pv->index_pt_shear_g];
}
else {
/** - ----> (a.1.2.) radiation streaming approximation */
delta_g = 0.; /* actual free streaming approximation imposed after evaluation of einstein equations */
theta_g = 0.; /* actual free streaming approximation imposed after evaluation of einstein equations */
shear_g = 0.; /* shear always neglected in radiation streaming approximation */
}
}
else {
/** - ----> (a.1.3.) tight coupling approximation */
delta_g = y[ppw->pv->index_pt_delta_g];
theta_g = y[ppw->pv->index_pt_theta_g];
/* first-order tight-coupling approximation for photon shear */
if (ppt->gauge == newtonian) {
shear_g = 16./45./ppw->pvecthermo[pth->index_th_dkappa]*y[ppw->pv->index_pt_theta_g];
}
else {
shear_g = 0.; /* in the synchronous gauge, the expression of
shear_g (at first-order in a tight-coupling
expansion) is a function of h' and eta'; but h'
and eta' are calculated in perturb_einstein()
as a function of delta_g and theta_g. Hence,
we set shear_g temporarily to zero, and set it
to the right first-order value in
perturb_einstein(), just before using the
Einstein equation for the shear. */
}
}
/** - ---> (a.2.) ur */
if (pba->has_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
delta_ur = y[ppw->pv->index_pt_delta_ur];
theta_ur = y[ppw->pv->index_pt_theta_ur];
shear_ur = y[ppw->pv->index_pt_shear_ur];
}
else {
delta_ur = 0.; /* actual free streaming approximation imposed after evaluation of 1st einstein equation */
theta_ur = 0.; /* actual free streaming approximation imposed after evaluation of 1st einstein equation */
shear_ur = 0.; /* shear always neglected in free streaming approximation */
}
}
/** - --> (b) compute the total density, velocity and shear perturbations */
/* photon and baryon contribution */
ppw->delta_rho = ppw->pvecback[pba->index_bg_rho_g]*delta_g
+ ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_delta_b];
ppw->rho_plus_p_theta = 4./3.*ppw->pvecback[pba->index_bg_rho_g]*theta_g
+ ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_theta_b];
ppw->rho_plus_p_shear = 4./3.*ppw->pvecback[pba->index_bg_rho_g]*shear_g;
ppw->delta_p = 1./3.*ppw->pvecback[pba->index_bg_rho_g]*delta_g
+ ppw->pvecthermo[pth->index_th_cb2]*ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_delta_b];
rho_plus_p_tot = 4./3. * ppw->pvecback[pba->index_bg_rho_g] + ppw->pvecback[pba->index_bg_rho_b];
/* cdm contribution */
if (pba->has_cdm == _TRUE_) {
ppw->delta_rho = ppw->delta_rho + ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_delta_cdm];
if (ppt->gauge == newtonian)
ppw->rho_plus_p_theta = ppw->rho_plus_p_theta + ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_theta_cdm];
rho_plus_p_tot += ppw->pvecback[pba->index_bg_rho_cdm];
}
/* dcdm contribution */
if (pba->has_dcdm == _TRUE_) {
ppw->delta_rho += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_delta_dcdm];
ppw->rho_plus_p_theta += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_theta_dcdm];
rho_plus_p_tot += ppw->pvecback[pba->index_bg_rho_dcdm];
}
/* ultra-relativistic decay radiation */
if (pba->has_dr == _TRUE_) {
/* We have delta_rho_dr = rho_dr * F0_dr / f, where F follows the
convention in astro-ph/9907388 and f is defined as
f = rho_dr*a^4/rho_crit_today. In CLASS density units
rho_crit_today = H0^2.
*/
rho_dr_over_f = pow(pba->H0/a2,2);
ppw->delta_rho += rho_dr_over_f*y[ppw->pv->index_pt_F0_dr];
ppw->rho_plus_p_theta += 4./3.*3./4*k*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr+1];
ppw->rho_plus_p_shear += 2./3.*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr+2];
ppw->delta_p += 1./3.*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr];
rho_plus_p_tot += 4./3. * ppw->pvecback[pba->index_bg_rho_dr];
}
/* ultra-relativistic neutrino/relics contribution */
if (pba->has_ur == _TRUE_) {
ppw->delta_rho = ppw->delta_rho + ppw->pvecback[pba->index_bg_rho_ur]*delta_ur;
ppw->rho_plus_p_theta = ppw->rho_plus_p_theta + 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*theta_ur;
ppw->rho_plus_p_shear = ppw->rho_plus_p_shear + 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*shear_ur;
ppw->delta_p += 1./3.*ppw->pvecback[pba->index_bg_rho_ur]*delta_ur;
rho_plus_p_tot += 4./3. * ppw->pvecback[pba->index_bg_rho_ur];
}
/* non-cold dark matter contribution */
if (pba->has_ncdm == _TRUE_) {
idx = ppw->pv->index_pt_psi0_ncdm1;
if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on){
// The perturbations are evolved integrated:
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_ncdm_bg = ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
p_ncdm_bg = ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm];
pseudo_p_ncdm = ppw->pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm];
rho_plus_p_ncdm = rho_ncdm_bg + p_ncdm_bg;
w_ncdm = p_ncdm_bg/rho_ncdm_bg;
cg2_ncdm = w_ncdm*(1.0-1.0/(3.0+3.0*w_ncdm)*(3.0*w_ncdm-2.0+pseudo_p_ncdm/p_ncdm_bg));
if ((ppt->has_source_delta_ncdm == _TRUE_) || (ppt->has_source_theta_ncdm == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) {
ppw->delta_ncdm[n_ncdm] = y[idx];
ppw->theta_ncdm[n_ncdm] = y[idx+1];
ppw->shear_ncdm[n_ncdm] = y[idx+2];
}
ppw->delta_rho += rho_ncdm_bg*y[idx];
ppw->rho_plus_p_theta += rho_plus_p_ncdm*y[idx+1];
ppw->rho_plus_p_shear += rho_plus_p_ncdm*y[idx+2];
ppw->delta_p += cg2_ncdm*rho_ncdm_bg*y[idx];
rho_plus_p_tot += rho_plus_p_ncdm;
idx += ppw->pv->l_max_ncdm[n_ncdm]+1;
}
}
else{
// We must integrate to find perturbations:
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_delta_ncdm = 0.0;
rho_plus_p_theta_ncdm = 0.0;
rho_plus_p_shear_ncdm = 0.0;
delta_p_ncdm = 0.0;
factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4);
for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) {
q = pba->q_ncdm[n_ncdm][index_q];
q2 = q*q;
epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2);
rho_delta_ncdm += q2*epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
rho_plus_p_theta_ncdm += q2*q*pba->w_ncdm[n_ncdm][index_q]*y[idx+1];
rho_plus_p_shear_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx+2];
delta_p_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
//Jump to next momentum bin:
idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1);
}
rho_delta_ncdm *= factor;
rho_plus_p_theta_ncdm *= k*factor;
rho_plus_p_shear_ncdm *= 2.0/3.0*factor;
delta_p_ncdm *= factor/3.;
if ((ppt->has_source_delta_ncdm == _TRUE_) || (ppt->has_source_theta_ncdm == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) {
ppw->delta_ncdm[n_ncdm] = rho_delta_ncdm/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
ppw->theta_ncdm[n_ncdm] = rho_plus_p_theta_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
ppw->shear_ncdm[n_ncdm] = rho_plus_p_shear_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
}
ppw->delta_rho += rho_delta_ncdm;
ppw->rho_plus_p_theta += rho_plus_p_theta_ncdm;
ppw->rho_plus_p_shear += rho_plus_p_shear_ncdm;
ppw->delta_p += delta_p_ncdm;
rho_plus_p_tot += ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm];
}
}
}
/* scalar field contribution.
In Newtonian gauge, delta_scf depends on the metric perturbation psi which is inferred
from rho_plus_p_shear. So the contribution from the scalar field must be below all
species with non-zero shear.
*/
if (pba->has_scf == _TRUE_) {
if (ppt->gauge == synchronous){
delta_rho_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]);
delta_p_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
- ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]);
}
else{
/* equation for psi */
psi = y[ppw->pv->index_pt_phi] - 4.5 * (a2/k/k) * ppw->rho_plus_p_shear;
delta_rho_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]
- 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*psi);
delta_p_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
- ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]
- 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*psi);
}
ppw->delta_rho += delta_rho_scf;
ppw->rho_plus_p_theta += 1./3.*
k*k/a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_scf];
ppw->delta_p += delta_p_scf;
rho_plus_p_tot += ppw->pvecback[pba->index_bg_rho_scf]+ppw->pvecback[pba->index_bg_p_scf];
}
/* add your extra species here */
/* fluid contribution */
/************************/
/* For use with CONCEPT */
/************************/
/**
* Count up total pressure and conformal time derivative of pressure,
* excluding the fld species. These are used for the PPF formalism of fld.
*/
double p_tot = 0.;
double p_tot_prime = 0.;
if (pba->has_fld == _TRUE_ && pba->use_ppf == _TRUE_) {
/* Photons */
p_tot += 1./3.*ppw->pvecback[pba->index_bg_rho_g];
p_tot_prime += -3.*a_prime_over_a*(1. + 1./3.)*1./3.
*ppw->pvecback[pba->index_bg_rho_g];
/* Baryons have no pressure */
/* Ultra relativistic species */
if (pba->has_ur == _TRUE_) {
p_tot += 1./3.*ppw->pvecback[pba->index_bg_rho_ur];
p_tot_prime += -3.*a_prime_over_a*(1. + 1./3.)*1./3.
*ppw->pvecback[pba->index_bg_rho_ur];
}
/* Cold dark matter has no pressure */
/* Non-cold dark matter */
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++) {
p_tot += ppw->pvecback[pba->index_bg_p_ncdm1 + n_ncdm];
p_tot_prime += -a_prime_over_a*(5.*ppw->pvecback[pba->index_bg_p_ncdm1 + n_ncdm]
- ppw->pvecback[pba->index_bg_pseudo_p_ncdm1 + n_ncdm]);
}
}
/* Decaying cold dark matter has no pressure */
/* Decay radiation */
if (pba->has_dr == _TRUE_) {
p_tot += 1./3.*ppw->pvecback[pba->index_bg_rho_dr];
p_tot_prime += -3.*a_prime_over_a*(1. + 1./3.)*1./3.
*ppw->pvecback[pba->index_bg_rho_dr]
+ 1./3.*a*pba->Gamma_dcdm*ppw->pvecback[pba->index_bg_rho_dcdm];
}
/* Importantly, we skip the dark energy fluid */
/* Scalar field */
if (pba->has_scf == _TRUE_) {
p_tot += ppw->pvecback[pba->index_bg_p_scf];
p_tot_prime += -a_prime_over_a/(a*a)*ppw->pvecback[pba->index_bg_phi_prime_scf]
*ppw->pvecback[pba->index_bg_phi_prime_scf]
- 2./3.*ppw->pvecback[pba->index_bg_dV_scf]
*ppw->pvecback[pba->index_bg_phi_prime_scf];
}
/* Lambda has constant pressure */
}
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
if (pba->has_fld == _TRUE_) {
class_call(background_w_fld(pba,a,&w_fld,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
/************************/
/* For use with CONCEPT */
/************************/
double w_prime_fld = dw_over_da_fld*a_prime_over_a*a;
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
if (pba->use_ppf == _FALSE_) {
ppw->delta_rho_fld = ppw->pvecback[pba->index_bg_rho_fld]*y[ppw->pv->index_pt_delta_fld];
ppw->rho_plus_p_theta_fld = (1.+w_fld)*ppw->pvecback[pba->index_bg_rho_fld]*y[ppw->pv->index_pt_theta_fld];
/************************/
/* For use with CONCEPT */
/************************/
/* Pressure perturbation of fld without PPF */
double ca2_fld = w_fld - w_prime_fld/(3.*a_prime_over_a*(1. + w_fld));
ppw->delta_p_fld = pba->cs2_fld*ppw->delta_rho_fld
+ (pba->cs2_fld - ca2_fld)*(3.*a_prime_over_a*ppw->rho_plus_p_theta_fld/k2);
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
}
else {
s2sq = ppw->s_l[2]*ppw->s_l[2];
/************************/
/* For use with CONCEPT */
/************************/
/**
* The computation of Gamma_fld and Gamma_prime_fld becomes unstable
* at large c_Gamma*k/H. To stabilise the system we set these to zero
* at some large c_Gamma*k/(aH).
* As to not introduce discontinuities, we have a smooth transition
* phase between the untouched values and completely nullified values.
* This transition is given the shape of an error function in
* log(c_Gamma*k/(aH)) space. The parameters c_gamma_k_H_square_max_{0|1}
* specify the borders of the transition.
* Here we nullify/shrink Gamma_fld only.
*/
double Gamma_fld, Gamma_weight, Gamma_weight_steepness;
double c_gamma_k_H_square_max_0, c_gamma_k_H_square_max_1;
c_gamma_k_H_square_max_0 = 1e+3;
c_gamma_k_H_square_max_1 = 1e+4;
c_gamma_k_H_square = pow(pba->c_gamma_over_c_fld*k/a_prime_over_a, 2)*pba->cs2_fld;
if (c_gamma_k_H_square > c_gamma_k_H_square_max_1){
Gamma_fld = 0.;
} else {
Gamma_fld = y[ppw->pv->index_pt_Gamma_fld];
if (c_gamma_k_H_square > c_gamma_k_H_square_max_0){
Gamma_weight_steepness = 5.; /* 5 results in double precision perfect transition */
Gamma_weight = 0.5*(erf(Gamma_weight_steepness*(
0.5*(log(c_gamma_k_H_square_max_0) + log(c_gamma_k_H_square_max_1))
- log(c_gamma_k_H_square)
)) + 1.);
Gamma_fld *= Gamma_weight;
}
}
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
/************************/
/* For use with CONCEPT */
/************************/
double alpha_prime, X, Y, Z, X_prime, Y_prime, Z_prime;
double rho_plus_p_theta_fld_prime, metric_euler;
double rho_t, rho_t_prime, p_t, p_t_prime, rho_fld, rho_fld_prime, p_fld, p_fld_prime;
double H, H_prime;
double theta_t,theta_t_prime, S, S_prime;
if (ppt->gauge == synchronous) {
alpha = (y[ppw->pv->index_pt_eta] + 1.5*a2/k2/s2sq*(ppw->delta_rho
+ 3.*a_prime_over_a/k2*ppw->rho_plus_p_theta)
- Gamma_fld)/a_prime_over_a;
alpha_prime = -2.*a_prime_over_a*alpha + y[ppw->pv->index_pt_eta]
- 4.5*(a2/k2)*ppw->rho_plus_p_shear;
metric_euler = 0.;
} else {
alpha = 0.;
alpha_prime = 0.;
metric_euler = k2*y[ppw->pv->index_pt_phi] - 4.5*a2*ppw->rho_plus_p_shear;
}
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
ppw->S_fld = ppw->pvecback[pba->index_bg_rho_fld]*(1.+w_fld)*1.5*a2/k2/a_prime_over_a*
(ppw->rho_plus_p_theta/rho_plus_p_tot+k2*alpha);
// note that the last terms in the ratio do not include fld, that's correct, it's the whole point of the PPF scheme
/************************/
/* For use with CONCEPT */
/************************/
/* Nullify/shrink Gamma_prime_fld as done for Gamma_fld above */
if (c_gamma_k_H_square > c_gamma_k_H_square_max_1){
ppw->Gamma_prime_fld = 0.;
} else {
ppw->Gamma_prime_fld = a_prime_over_a*(ppw->S_fld/(1. + c_gamma_k_H_square)
- (1. + c_gamma_k_H_square)*Gamma_fld);
if (c_gamma_k_H_square > c_gamma_k_H_square_max_0){
ppw->Gamma_prime_fld *= Gamma_weight;
}
}
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
Gamma_prime_plus_a_prime_over_a_Gamma = ppw->Gamma_prime_fld+a_prime_over_a*Gamma_fld;
// delta and theta in both gauges gauge:
ppw->rho_plus_p_theta_fld = ppw->pvecback[pba->index_bg_rho_fld]*(1.+w_fld)*ppw->rho_plus_p_theta/rho_plus_p_tot-
k2*2./3.*a_prime_over_a/a2/(1+4.5*a2/k2/s2sq*rho_plus_p_tot)*
(ppw->S_fld-Gamma_prime_plus_a_prime_over_a_Gamma/a_prime_over_a);
ppw->delta_rho_fld = -2./3.*k2*s2sq/a2*Gamma_fld-3*a_prime_over_a/k2*ppw->rho_plus_p_theta_fld;
/************************/
/* For use with CONCEPT */
/************************/
rho_t = rho_plus_p_tot - p_tot;
p_t = p_tot;
rho_t_prime = -3.*a_prime_over_a*(rho_t + p_t);
p_t_prime = p_tot_prime;
rho_fld = ppw->pvecback[pba->index_bg_rho_fld];
p_fld = w_fld*rho_fld;
rho_fld_prime = -3.*a_prime_over_a*(rho_fld + p_fld);
p_fld_prime = w_prime_fld*rho_fld - 3.*a_prime_over_a*(1. + w_fld)*p_fld;
H = ppw->pvecback[pba->index_bg_H];
H_prime = ppw->pvecback[pba->index_bg_H_prime];
X = c_gamma_k_H_square;
X_prime = -2.*X*(a_prime_over_a + H_prime/H);
Y = 4.5*a2/k2/s2sq*(rho_t + p_t);
Y_prime = Y*(2.*a_prime_over_a + (rho_t_prime + p_t_prime)/(rho_t + p_t));
Z = 2./3.*k2*H/a;
Z_prime = Z*(H_prime/H - a_prime_over_a);
theta_t = ppw->rho_plus_p_theta/rho_plus_p_tot;
theta_t_prime = -a_prime_over_a*theta_t + (-p_t_prime*theta_t + k2*ppw->delta_p
- k2*ppw->rho_plus_p_shear)/rho_plus_p_tot+metric_euler;
S = ppw->S_fld;
S_prime = -Z_prime/Z*S + 1./Z*(rho_fld_prime + p_fld_prime)*(theta_t + k2*alpha)
+ 1./Z*(rho_fld + p_fld)*(theta_t_prime + k2*alpha_prime);
rho_plus_p_theta_fld_prime = Z_prime*(S - 1./(1. + Y)*(S/(1. + 1./X)
+ Gamma_fld*X))
+ Z*(S_prime + Y_prime/(1. + Y*Y + 2.*Y)*(S/(1. + 1./X)
+ Gamma_fld*X)
- 1./(1. + Y)*(S_prime/(1. + 1./X) + S*X_prime/(1. + X*X + 2.*X)
+ ppw->Gamma_prime_fld*X + Gamma_fld*X_prime))
- k2*alpha_prime*(rho_fld + p_fld) - k2*alpha*(rho_fld_prime + p_fld_prime);
ppw->delta_p_fld = (rho_plus_p_theta_fld_prime
+ 4.*a_prime_over_a*ppw->rho_plus_p_theta_fld - (rho_fld + p_fld)*metric_euler)/k2;
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
}
ppw->delta_rho += ppw->delta_rho_fld;
ppw->rho_plus_p_theta += ppw->rho_plus_p_theta_fld;
/************************/
/* For use with CONCEPT */
/************************/
ppw->delta_p += ppw->delta_p_fld;
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
}
/* don't add species here, add them before the fluid contribution: because of the PPF scheme that one must be the last one! */
/* store delta_m in the current gauge. In perturb_einstein, this
will be transformed later on into the gauge-independent variable D
= delta_m - 2H'/H \theta_m/k^2 . */
if (ppt->has_source_delta_m == _TRUE_) {
/* include baryons and cold dark matter */
delta_rho_m = ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_delta_b];
rho_m = ppw->pvecback[pba->index_bg_rho_b];
if (pba->has_cdm == _TRUE_) {
delta_rho_m += ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_delta_cdm];
rho_m += ppw->pvecback[pba->index_bg_rho_cdm];
}
/* include decaying cold dark matter */
if (pba->has_dcdm == _TRUE_) {
delta_rho_m += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_delta_dcdm];
rho_m += ppw->pvecback[pba->index_bg_rho_dcdm];
}
/* infer delta_cb */
if (ppt->has_source_delta_cb)
ppw->delta_cb = delta_rho_m/rho_m;
/* include any other species non-relativistic today (like ncdm species) */
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
delta_rho_m += ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]*ppw->delta_ncdm[n_ncdm];
rho_m += ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
}
}
/* infer delta_m */
ppw->delta_m = delta_rho_m/rho_m;
}
/* store theta_m in the current gauge. In perturb_einstein, this
will be transformed later on into the gauge-independent variable
Theta . Note that computing theta_m is necessary also if we want
the delta_m source only, because the gauge-invariant delta_m
involves theta_m in the current gauge. */
if ((ppt->has_source_delta_m == _TRUE_) || (ppt->has_source_theta_m == _TRUE_)) {
/* include baryons and cold dark matter */
rho_plus_p_theta_m = ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_theta_b];
rho_plus_p_m = ppw->pvecback[pba->index_bg_rho_b];
if (pba->has_cdm == _TRUE_) {
if (ppt->gauge == newtonian)
rho_plus_p_theta_m += ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_theta_cdm];
rho_plus_p_m += ppw->pvecback[pba->index_bg_rho_cdm];
}
if (pba->has_dcdm == _TRUE_) {
rho_plus_p_theta_m += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_theta_dcdm];
rho_plus_p_m += ppw->pvecback[pba->index_bg_rho_dcdm];
}
if ((ppt->has_source_delta_cb == _TRUE_) || (ppt->has_source_theta_cb == _TRUE_))
ppw->theta_cb = rho_plus_p_theta_m/rho_plus_p_m;
/* include any other species non-relativistic today (like ncdm species) */
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_plus_p_theta_m += (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm])*ppw->theta_ncdm[n_ncdm];
rho_plus_p_m += (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
}
}
/* infer theta_m */
ppw->theta_m = rho_plus_p_theta_m/rho_plus_p_m;
}
}
/** - for vector modes */
if (_vectors_) {
ppw->vector_source_pi = 0.;
ppw->vector_source_v = 0.;
/** - --> photon contribution to vector sources: */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */
ppw->vector_source_v += 4./3.*a2*ppw->pvecback[pba->index_bg_rho_g]
* (-1./4.*_SQRT2_)
* (y[ppw->pv->index_pt_delta_g]+2.*y[ppw->pv->index_pt_delta_g]+y[ppw->pv->index_pt_shear_g]);
ppw->vector_source_pi += 1./3.*a2*ppw->pvecback[pba->index_bg_rho_g]
* (6.*_SQRT2_/5./sqrt(1.-2.*pba->K/k/k))
* (4./3./k*y[ppw->pv->index_pt_theta_g]+y[ppw->pv->index_pt_l3_g]);
}
}
/** - --> baryons */
}
/** - for tensor modes */
if (_tensors_) {
ppw->gw_source = 0.0;
/** - --> photon contribution to gravitational wave source: */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */
ppw->gw_source += (-_SQRT6_*4*a2*ppw->pvecback[pba->index_bg_rho_g]*
(1./15.*y[ppw->pv->index_pt_delta_g]+
4./21.*y[ppw->pv->index_pt_shear_g]+
1./35.*y[ppw->pv->index_pt_l3_g+1]));
}
}
/** - --> ur contribution to gravitational wave source: */
if (ppt->evolve_tensor_ur == _TRUE_){
rho_relativistic = 0.;
if (ppt->tensor_method == tm_exact)
rho_relativistic += ppw->pvecback[pba->index_bg_rho_ur];
if (ppt->tensor_method == tm_massless_approximation) {
if (pba->has_ur == _TRUE_)
rho_relativistic += ppw->pvecback[pba->index_bg_rho_ur];
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++) {
/* (3 p_ncdm1) is the "relativistic" contribution to rho_ncdm1 */
rho_relativistic += 3.*ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm];
}
}
}
ppw->gw_source += (-_SQRT6_*4*a2*rho_relativistic*
(1./15.*y[ppw->pv->index_pt_delta_ur]+
4./21.*y[ppw->pv->index_pt_shear_ur]+
1./35.*y[ppw->pv->index_pt_l3_ur+1]));
}
/** - --> ncdm contribution to gravitational wave source: */
if (ppt->evolve_tensor_ncdm == _TRUE_){
idx = ppw->pv->index_pt_psi0_ncdm1;
// We must integrate to find perturbations:
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
gwncdm = 0.;
factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4);
for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) {
q = pba->q_ncdm[n_ncdm][index_q];
q2 = q*q;
epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2);
gwncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*(1./15.*y[idx]+2./21.*y[idx+2]+1./35.*y[idx+4]);
//Jump to next momentum bin:
idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1);
}
gwncdm *= -_SQRT6_*4*a2*factor;
ppw->gw_source += gwncdm;
}
}
}
return _SUCCESS_;
}
/**
* Compute the source functions (three terms for temperature, one for
* E or B modes, etc.)
*
* This is one of the few functions in the code which is passed to
* the generic_integrator() routine. Since generic_integrator()
* should work with functions passed from various modules, the format
* of the arguments is a bit special:
*
* - fixed parameters and workspaces are passed through a generic
* pointer. generic_integrator() doesn't know the content of this
* pointer.
*
* - the error management is a bit special: errors are not written as
* usual to pth->error_message, but to a generic error_message passed
* in the list of arguments.
*
* @param tau Input: conformal time
* @param y Input: vector of perturbations
* @param dy Input: vector of time derivative of perturbations
* @param index_tau Input: index in the array tau_sampling
* @param parameters_and_workspace Input/Output: in input, all parameters needed by perturb_derivs, in output, source terms
* @param error_message Output: error message
* @return the error status
*/
int perturb_sources(
double tau,
double * y,
double * dy,
int index_tau,
void * parameters_and_workspace,
ErrorMsg error_message
) {
/** Summary: */
/** - define local variables */
double P;
int index_type;
struct perturb_parameters_and_workspace * pppaw;
struct precision * ppr;
struct background * pba;
struct thermo * pth;
struct perturbs * ppt;
int index_md;
int index_ic;
int index_k;
double k;
double z;
struct perturb_workspace * ppw;
double * pvecback;
double * pvecthermo;
double * pvecmetric;
double delta_g, delta_rho_scf, rho_plus_p_theta_scf;
double a_prime_over_a=0.; /* (a'/a) */
double a_prime_over_a_prime=0.; /* (a'/a)' */
double w_fld,dw_over_da_fld,integral_fld;
int switch_isw = 1;
double a_rel, a2_rel, f_dr;
/** - rename structure fields (just to avoid heavy notations) */
pppaw = parameters_and_workspace;
ppr = pppaw->ppr;
pba = pppaw->pba;
pth = pppaw->pth;
ppt = pppaw->ppt;
index_md = pppaw->index_md;
index_ic = pppaw->index_ic;
index_k = pppaw->index_k;
k = pppaw->k;
ppw = pppaw->ppw;
pvecback = ppw->pvecback;
pvecthermo = ppw->pvecthermo;
pvecmetric = ppw->pvecmetric;
/** - get background/thermo quantities in this point */
class_call(background_at_tau(pba,
tau,
pba->normal_info,
pba->inter_closeby,
&(ppw->last_index_back),
pvecback),
pba->error_message,
error_message);
z = pba->a_today/pvecback[pba->index_bg_a]-1.;
class_call(thermodynamics_at_z(pba,
pth,
z, /* redshift z=1/a-1 */
pth->inter_closeby,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
a_rel = ppw->pvecback[pba->index_bg_a]/pba->a_today;
a2_rel = a_rel * a_rel;
/* derived background quantities, useful only in synchronous gauge */
if (ppt->gauge == synchronous) {
a_prime_over_a = pvecback[pba->index_bg_a] * pvecback[pba->index_bg_H]; /* (a'/a)=aH */
a_prime_over_a_prime = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a] + pow(pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a],2); /* (a'/a)' = aH'+(aH)^2 */
}
/** - for scalars */
if (_scalars_) {
/** - --> compute metric perturbations */
class_call(perturb_einstein(ppr,
pba,
pth,
ppt,
index_md,
k,
tau,
y,
ppw),
ppt->error_message,
error_message);
/** - --> compute quantities depending on approximation schemes */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
delta_g = ppw->rsa_delta_g;
P = 0.;
}
else {
delta_g = y[ppw->pv->index_pt_delta_g];
if (ppw->approx[ppw->index_ap_tca] == (int)tca_on)
P = 5.* ppw->s_l[2] * ppw->tca_shear_g/8.; /* (2.5+0.5+2)shear_g/8 */
else
P = (y[ppw->pv->index_pt_pol0_g] + y[ppw->pv->index_pt_pol2_g] + 2.* ppw->s_l[2] *y[ppw->pv->index_pt_shear_g])/8.;
}
/** - --> for each type, compute source terms */
/* scalar temperature */
if (ppt->has_source_t == _TRUE_) {
/* check whether integrated Sachs-Wolf term should be included */
if ((ppt->switch_eisw == 0) && (z >= ppt->eisw_lisw_split_z)){
switch_isw = 0;
}
if ((ppt->switch_lisw == 0) && (z < ppt->eisw_lisw_split_z)) {
switch_isw=0;
}
/* newtonian gauge: simplest form, not efficient numerically */
/*
if (ppt->gauge == newtonian) {
_set_source_(ppt->index_tp_t0) = pvecthermo[pth->index_th_exp_m_kappa] * pvecmetric[ppw->index_mt_phi_prime] + pvecthermo[pth->index_th_g] * delta_g / 4.;
_set_source_(ppt->index_tp_t1) = pvecthermo[pth->index_th_exp_m_kappa] * k* pvecmetric[ppw->index_mt_psi] + pvecthermo[pth->index_th_g] * y[ppw->pv->index_pt_theta_b]/k;
_set_source_(ppt->index_tp_t2) = pvecthermo[pth->index_th_g] * P;
}
*/
/* newtonian gauge: slightly more complicated form, but more efficient numerically */
if (ppt->gauge == newtonian) {
_set_source_(ppt->index_tp_t0) =
ppt->switch_sw * pvecthermo[pth->index_th_g] * (delta_g / 4. + pvecmetric[ppw->index_mt_psi])
+ switch_isw * (pvecthermo[pth->index_th_g] * (y[ppw->pv->index_pt_phi]-pvecmetric[ppw->index_mt_psi])
+ pvecthermo[pth->index_th_exp_m_kappa] * 2. * pvecmetric[ppw->index_mt_phi_prime])
+ ppt->switch_dop /k/k * (pvecthermo[pth->index_th_g] * dy[ppw->pv->index_pt_theta_b]
+ pvecthermo[pth->index_th_dg] * y[ppw->pv->index_pt_theta_b]);
_set_source_(ppt->index_tp_t1) = switch_isw * pvecthermo[pth->index_th_exp_m_kappa] * k* (pvecmetric[ppw->index_mt_psi]-y[ppw->pv->index_pt_phi]);
_set_source_(ppt->index_tp_t2) = ppt->switch_pol * pvecthermo[pth->index_th_g] * P;
}
/* synchronous gauge: simplest form, not efficient numerically */
/*
if (ppt->gauge == synchronous) {
_set_source_(ppt->index_tp_t0) = - pvecthermo[pth->index_th_exp_m_kappa] * pvecmetric[ppw->index_mt_h_prime] / 6. + pvecthermo[pth->index_th_g] / 4. * delta_g;
_set_source_(ppt->index_tp_t1) = pvecthermo[pth->index_th_g] * y[ppw->pv->index_pt_theta_b] / k;
_set_source_(ppt->index_tp_t2) = pvecthermo[pth->index_th_exp_m_kappa] * k*k* 2./3. * ppw->s_l[2] * pvecmetric[ppw->index_mt_alpha] + pvecthermo[pth->index_th_g] * P;
}
*/
/* synchronous gauge: slightly more complicated form, but more efficient numerically */
if (ppt->gauge == synchronous) {
_set_source_(ppt->index_tp_t0) =
ppt->switch_sw * pvecthermo[pth->index_th_g] * (delta_g/4. + pvecmetric[ppw->index_mt_alpha_prime])
+ switch_isw * (pvecthermo[pth->index_th_g] * (y[ppw->pv->index_pt_eta]
- pvecmetric[ppw->index_mt_alpha_prime]
- 2 * a_prime_over_a * pvecmetric[ppw->index_mt_alpha])
+ pvecthermo[pth->index_th_exp_m_kappa] * 2. * (pvecmetric[ppw->index_mt_eta_prime]
- a_prime_over_a_prime * pvecmetric[ppw->index_mt_alpha]
- a_prime_over_a * pvecmetric[ppw->index_mt_alpha_prime]))
+ ppt->switch_dop * (pvecthermo[pth->index_th_g] * (dy[ppw->pv->index_pt_theta_b]/k/k + pvecmetric[ppw->index_mt_alpha_prime])
+pvecthermo[pth->index_th_dg] * (y[ppw->pv->index_pt_theta_b]/k/k + pvecmetric[ppw->index_mt_alpha]));
_set_source_(ppt->index_tp_t1) =
switch_isw * pvecthermo[pth->index_th_exp_m_kappa] * k * (pvecmetric[ppw->index_mt_alpha_prime]
+ 2. * a_prime_over_a * pvecmetric[ppw->index_mt_alpha]
- y[ppw->pv->index_pt_eta]);
_set_source_(ppt->index_tp_t2) =
ppt->switch_pol * pvecthermo[pth->index_th_g] * P;
}
}
/* scalar polarization */
if (ppt->has_source_p == _TRUE_) {
/* all gauges. Note that the correct formula for the E source
should have a minus sign, as shown in Hu & White. We put a
plus sign to comply with the 'historical convention'
established in CMBFAST and CAMB. */
_set_source_(ppt->index_tp_p) = sqrt(6.) * pvecthermo[pth->index_th_g] * P;
}
/* now, non-CMB sources */
/* Bardeen potential -PHI_H = phi in Newtonian gauge */
if (ppt->has_source_phi == _TRUE_) {
if (ppt->gauge == newtonian)
_set_source_(ppt->index_tp_phi) = y[ppw->pv->index_pt_phi];
if (ppt->gauge == synchronous)
_set_source_(ppt->index_tp_phi) = y[ppw->pv->index_pt_eta] - a_prime_over_a * pvecmetric[ppw->index_mt_alpha];
}
/* its derivative phi' */
if (ppt->has_source_phi_prime == _TRUE_) {
if (ppt->gauge == newtonian)
_set_source_(ppt->index_tp_phi_prime) = dy[ppw->pv->index_pt_phi];
if (ppt->gauge == synchronous)
_set_source_(ppt->index_tp_phi_prime) = dy[ppw->pv->index_pt_eta]
- a_prime_over_a_prime * pvecmetric[ppw->index_mt_alpha]
- a_prime_over_a * pvecmetric[ppw->index_mt_alpha_prime];
}
/* diff of Bardeen potentials PHI_A-PHI_H = psi + phi in newtonian gauge */
if (ppt->has_source_phi_plus_psi == _TRUE_) {
if (ppt->gauge == newtonian)
_set_source_(ppt->index_tp_phi_plus_psi) =
y[ppw->pv->index_pt_phi] + pvecmetric[ppw->index_mt_psi];
if (ppt->gauge == synchronous)
_set_source_(ppt->index_tp_phi_plus_psi) =
y[ppw->pv->index_pt_eta] + pvecmetric[ppw->index_mt_alpha_prime];
}
/* Bardeen potential PHI_A = psi in newtonian gauge */
if (ppt->has_source_psi == _TRUE_) {
if (ppt->gauge == newtonian)
_set_source_(ppt->index_tp_psi) =
pvecmetric[ppw->index_mt_psi];
if (ppt->gauge == synchronous)
_set_source_(ppt->index_tp_psi) =
a_prime_over_a * pvecmetric[ppw->index_mt_alpha] + pvecmetric[ppw->index_mt_alpha_prime];
}
/* the metric potentials h and eta in synchronous gauge */
if (ppt->gauge == synchronous) {
/* cdm is always on in synchronous gauge, see error message above that checks gauge and has_cdm */
if (ppt->has_source_h == _TRUE_)
_set_source_(ppt->index_tp_h) = - 2 * y[ppw->pv->index_pt_delta_cdm];
if (ppt->has_source_h_prime == _TRUE_)
_set_source_(ppt->index_tp_h_prime) = pvecmetric[ppw->index_mt_h_prime];
if (ppt->has_source_eta == _TRUE_)
_set_source_(ppt->index_tp_eta) = y[ppw->pv->index_pt_eta];
if (ppt->has_source_eta_prime == _TRUE_)
_set_source_(ppt->index_tp_eta_prime) = dy[ppw->pv->index_pt_eta];
}
/* total matter over density (gauge-invariant, defined as in arXiv:1307.1459) */
if (ppt->has_source_delta_m == _TRUE_) {
_set_source_(ppt->index_tp_delta_m) = ppw->delta_m;
}
/* cdm and baryon over density */
if (ppt->has_source_delta_cb == _TRUE_) {
_set_source_(ppt->index_tp_delta_cb) = ppw->delta_cb;
}
/* delta_g */
if (ppt->has_source_delta_g == _TRUE_) {
_set_source_(ppt->index_tp_delta_g) = delta_g;
}
/* delta_baryon */
if (ppt->has_source_delta_b == _TRUE_) {
_set_source_(ppt->index_tp_delta_b) = y[ppw->pv->index_pt_delta_b];
}
/* delta_cdm */
if (ppt->has_source_delta_cdm == _TRUE_) {
_set_source_(ppt->index_tp_delta_cdm) = y[ppw->pv->index_pt_delta_cdm];
}
/* delta_dcdm */
if (ppt->has_source_delta_dcdm == _TRUE_) {
_set_source_(ppt->index_tp_delta_dcdm) = y[ppw->pv->index_pt_delta_dcdm];
}
/* delta_fld */
if (ppt->has_source_delta_fld == _TRUE_) {
_set_source_(ppt->index_tp_delta_fld) = ppw->delta_rho_fld/pvecback[pba->index_bg_rho_fld];
}
/* delta_scf */
if (ppt->has_source_delta_scf == _TRUE_) {
if (ppt->gauge == synchronous){
delta_rho_scf = 1./3.*
(1./a2_rel*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]);
}
else{
delta_rho_scf = 1./3.*
(1./a2_rel*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]
- 1./a2_rel*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*ppw->pvecmetric[ppw->index_mt_psi]);
}
_set_source_(ppt->index_tp_delta_scf) = delta_rho_scf/pvecback[pba->index_bg_rho_scf];
}
/* delta_dr */
if (ppt->has_source_delta_dr == _TRUE_) {
f_dr = pow(a2_rel/pba->H0,2)*pvecback[pba->index_bg_rho_dr];
_set_source_(ppt->index_tp_delta_dr) = y[ppw->pv->index_pt_F0_dr]/f_dr;
}
/* delta_ur */
if (ppt->has_source_delta_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off)
_set_source_(ppt->index_tp_delta_ur) = y[ppw->pv->index_pt_delta_ur];
else
_set_source_(ppt->index_tp_delta_ur) = ppw->rsa_delta_ur;
}
/* delta_ncdm1 */
if (ppt->has_source_delta_ncdm == _TRUE_) {
for (index_type = ppt->index_tp_delta_ncdm1; index_type < ppt->index_tp_delta_ncdm1+pba->N_ncdm; index_type++) {
_set_source_(index_type) = ppw->delta_ncdm[index_type - ppt->index_tp_delta_ncdm1];
}
}
/* total velocity (gauge-invariant, defined as in arXiv:1307.1459) */
if (ppt->has_source_theta_m == _TRUE_) {
_set_source_(ppt->index_tp_theta_m) = ppw->theta_m;
}
/* cdm and baryon velocity */
if (ppt->has_source_theta_cb == _TRUE_) {
_set_source_(ppt->index_tp_theta_cb) = ppw->theta_cb;
}
/* theta_g */
if (ppt->has_source_theta_g == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off)
_set_source_(ppt->index_tp_theta_g) = y[ppw->pv->index_pt_theta_g];
else
_set_source_(ppt->index_tp_theta_g) = ppw->rsa_theta_g;
}
/* theta_baryon */
if (ppt->has_source_theta_b == _TRUE_) {
_set_source_(ppt->index_tp_theta_b) = y[ppw->pv->index_pt_theta_b];
}
/* theta_cdm */
if (ppt->has_source_theta_cdm == _TRUE_) {
_set_source_(ppt->index_tp_theta_cdm) = y[ppw->pv->index_pt_theta_cdm];
}
/* theta_dcdm */
if (ppt->has_source_theta_dcdm == _TRUE_) {
_set_source_(ppt->index_tp_theta_dcdm) = y[ppw->pv->index_pt_theta_dcdm];
}
/* theta_fld */
if (ppt->has_source_theta_fld == _TRUE_) {
class_call(background_w_fld(pba,a_rel*pba->a_today,&w_fld,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
_set_source_(ppt->index_tp_theta_fld) = ppw->rho_plus_p_theta_fld/(1.+w_fld)/pvecback[pba->index_bg_rho_fld];
}
/* theta_scf */
if (ppt->has_source_theta_scf == _TRUE_) {
rho_plus_p_theta_scf = 1./3.*
k*k/a2_rel*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_scf];
_set_source_(ppt->index_tp_theta_scf) = rho_plus_p_theta_scf/
(pvecback[pba->index_bg_rho_scf]+pvecback[pba->index_bg_p_scf]);
}
/* theta_dr */
if (ppt->has_source_theta_dr == _TRUE_) {
f_dr = pow(a2_rel/pba->H0,2)*pvecback[pba->index_bg_rho_dr];
_set_source_(ppt->index_tp_theta_dr) = 3./4.*k*y[ppw->pv->index_pt_F0_dr+1]/f_dr;
}
/* theta_ur */
if (ppt->has_source_theta_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off)
_set_source_(ppt->index_tp_theta_ur) = y[ppw->pv->index_pt_theta_ur];
else
_set_source_(ppt->index_tp_theta_ur) = ppw->rsa_theta_ur;
}
/* theta_ncdm1 */
if (ppt->has_source_theta_ncdm == _TRUE_) {
for (index_type = ppt->index_tp_theta_ncdm1; index_type < ppt->index_tp_theta_ncdm1+pba->N_ncdm; index_type++) {
_set_source_(index_type) = ppw->theta_ncdm[index_type - ppt->index_tp_theta_ncdm1];
}
}
}
/** - for tensors */
if (_tensors_) {
/** - --> compute quantities depending on approximation schemes */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
P = -(1./10.*y[ppw->pv->index_pt_delta_g]
+2./7.*y[ppw->pv->index_pt_shear_g]
+3./70.*y[ppw->pv->index_pt_delta_g+4]
-3./5.*y[ppw->pv->index_pt_pol0_g]
+6./7.*y[ppw->pv->index_pt_pol2_g]
-3./70.*y[ppw->pv->index_pt_pol0_g+4])
/sqrt(6.);
}
else {
P = 2./5.*_SQRT6_*y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; //TBC
}
}
else {
P = 0.;
}
/* tensor temperature */
if (ppt->has_source_t == _TRUE_) {
_set_source_(ppt->index_tp_t2) = - y[ppw->pv->index_pt_gwdot] * pvecthermo[pth->index_th_exp_m_kappa] + pvecthermo[pth->index_th_g] * P;
}
/* tensor polarization */
if (ppt->has_source_p == _TRUE_) {
/* Note that the correct formula for the polarization source
should have a minus sign, as shown in Hu & White. We put a
plus sign to comply with the 'historical convention'
established in CMBFAST and CAMB. */
_set_source_(ppt->index_tp_p) = sqrt(6.) * pvecthermo[pth->index_th_g] * P;
}
}
return _SUCCESS_;
}
/**
* When testing the code or a cosmological model, it can be useful to
* output perturbations at each step of integration (and not just the
* delta's at each source sampling point, which is achieved simply by
* asking for matter transfer functions). Then this function can be
* passed to the generic_evolver routine.
*
* By default, instead of passing this function to generic_evolver,
* one passes a null pointer. Then this function is just not used.
*
* @param tau Input: conformal time
* @param y Input: vector of perturbations
* @param dy Input: vector of its derivatives (already allocated)
* @param parameters_and_workspace Input: fixed parameters (e.g. indices)
* @param error_message Output: error message
*
*/
int perturb_print_variables(double tau,
double * y,
double * dy,
void * parameters_and_workspace,
ErrorMsg error_message
) {
struct perturb_parameters_and_workspace * pppaw;
/** Summary: */
/** - define local variables */
double k;
int index_md;
struct precision * ppr;
struct background * pba;
struct thermo * pth;
struct perturbs * ppt;
struct perturb_workspace * ppw;
double * pvecback;
double * pvecthermo;
double * pvecmetric;
double delta_g,theta_g,shear_g,l4_g,pol0_g,pol1_g,pol2_g,pol4_g;
double delta_b,theta_b;
double delta_cdm=0.,theta_cdm=0.;
double delta_dcdm=0.,theta_dcdm=0.;
double delta_dr=0.,theta_dr=0.,shear_dr=0., f_dr=1.0;
double delta_ur=0.,theta_ur=0.,shear_ur=0.,l4_ur=0.;
double delta_rho_scf=0., rho_plus_p_theta_scf=0.;
double delta_scf=0., theta_scf=0.;
/** - ncdm sector begins */
int n_ncdm;
double *delta_ncdm=NULL, *theta_ncdm=NULL, *shear_ncdm=NULL, *delta_p_over_delta_rho_ncdm=NULL;
double rho_ncdm_bg, p_ncdm_bg, pseudo_p_ncdm, w_ncdm;
double rho_delta_ncdm = 0.0;
double rho_plus_p_theta_ncdm = 0.0;
double rho_plus_p_shear_ncdm = 0.0;
double delta_p_ncdm = 0.0;
double factor = 0.0;
double q,q2,epsilon;
/** - ncdm sector ends */
double phi=0.,psi=0.,alpha=0.;
double phi_prime=0.; //CGT
double delta_temp=0., delta_chi=0.;
double a,a2,H;
int idx,index_q, storeidx;
int index_l;
double *dataptr;
/************************/
/* For use with CONCEPT */
/************************/
/**
* Compute perturbation derivatives. This also ensures that the
* ppw (and other) structs are up-to-date. This is important
* when using the Runge-Kutta evolver, as this is otherwise
* not taken care off correctly.
*/
class_call(
perturb_derivs(tau, y, dy, parameters_and_workspace, error_message),
error_message,
error_message);
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
/** - rename structure fields (just to avoid heavy notations) */
pppaw = parameters_and_workspace;
k = pppaw->k;
index_md = pppaw->index_md;
ppr = pppaw->ppr;
pba = pppaw->pba;
pth = pppaw->pth;
ppt = pppaw->ppt;
ppw = pppaw->ppw;
pvecback = ppw->pvecback;
pvecthermo = ppw->pvecthermo;
pvecmetric = ppw->pvecmetric;
/** - update background/thermo quantities in this point */
class_call(background_at_tau(pba,
tau,
pba->normal_info,
pba->inter_closeby,
&(ppw->last_index_back),
pvecback),
pba->error_message,
error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1.,
pth->inter_closeby,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
/** - update metric perturbations in this point */
class_call(perturb_einstein(ppr,
pba,
pth,
ppt,
index_md,
k,
tau,
y,
ppw),
ppt->error_message,
error_message);
a = pvecback[pba->index_bg_a];
/************************/
/* For use with CONCEPT */
/************************/
double dlnf0_dlnq;
/* Only return output at late times */
double a_min = 3e-4;
if (a < a_min)
return _SUCCESS_;
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
a2 = a*a;
H = pvecback[pba->index_bg_H];
if (pba->has_ncdm == _TRUE_){
class_alloc(delta_ncdm, sizeof(double)*pba->N_ncdm,error_message);
class_alloc(theta_ncdm, sizeof(double)*pba->N_ncdm,error_message);
class_alloc(shear_ncdm, sizeof(double)*pba->N_ncdm,error_message);
class_alloc(delta_p_over_delta_rho_ncdm, sizeof(double)*pba->N_ncdm,error_message);
}
/** - calculate perturbed recombination */
if ((ppt->has_perturbed_recombination == _TRUE_) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off) ){
delta_temp = y[ppw->pv->index_pt_perturbed_recombination_delta_temp];
delta_chi =y[ppw->pv->index_pt_perturbed_recombination_delta_chi];
}
/** - for scalar modes */
if (_scalars_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) {
delta_g = y[ppw->pv->index_pt_delta_g];
theta_g = y[ppw->pv->index_pt_theta_g];
}
else {
delta_g = ppw->rsa_delta_g;
theta_g = ppw->rsa_theta_g;
}
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca]==(int)tca_on) {
shear_g = ppw->tca_shear_g;
//l3_g = 6./7.*k/pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g;
pol0_g = 2.5*ppw->tca_shear_g;
pol1_g = 7./12.*6./7.*k/pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g;
pol2_g = 0.5*ppw->tca_shear_g;
//pol3_g = 0.25*6./7.*k/pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g;
}
else {
shear_g = y[ppw->pv->index_pt_shear_g];
//l3_g = y[ppw->pv->index_pt_l3_g];
pol0_g = y[ppw->pv->index_pt_pol0_g];
pol1_g = y[ppw->pv->index_pt_pol1_g];
pol2_g = y[ppw->pv->index_pt_pol2_g];
//pol3_g = y[ppw->pv->index_pt_pol3_g];
}
}
else {
shear_g = 0;
//l3_g = 0;
pol0_g = 0;
pol1_g = 0;
pol2_g = 0;
//pol3_g = 0.;
}
if (pba->has_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) {
delta_ur = y[ppw->pv->index_pt_delta_ur];
theta_ur = y[ppw->pv->index_pt_theta_ur];
shear_ur = y[ppw->pv->index_pt_shear_ur];
}
else {
delta_ur = ppw->rsa_delta_ur;
theta_ur = ppw->rsa_theta_ur;
shear_ur = 0.;
}
}
delta_b = y[ppw->pv->index_pt_delta_b];
theta_b = y[ppw->pv->index_pt_theta_b];
if (pba->has_cdm == _TRUE_) {
delta_cdm = y[ppw->pv->index_pt_delta_cdm];
if (ppt->gauge == synchronous) {
theta_cdm = 0.;
}
else {
theta_cdm = y[ppw->pv->index_pt_theta_cdm];
}
}
/* gravitational potentials */
if (ppt->gauge == synchronous) {
alpha = pvecmetric[ppw->index_mt_alpha];
psi = pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a] * alpha + pvecmetric[ppw->index_mt_alpha_prime];
phi = y[ppw->pv->index_pt_eta] - pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
phi_prime = 0.0; //CGT
}
else if (ppt->gauge == newtonian){
psi = pvecmetric[ppw->index_mt_psi];
phi = y[ppw->pv->index_pt_phi];
phi_prime = dy[ppw->pv->index_pt_phi]; //CGT
}
else{
psi = 0.0;
phi = 0.0;
phi_prime = 0.0; //CGT
}
if (pba->has_ncdm == _TRUE_) {
/** - --> Get delta, deltaP/rho, theta, shear and store in array */
idx = ppw->pv->index_pt_psi0_ncdm1;
if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on){
// The perturbations are evolved integrated:
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_ncdm_bg = pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
p_ncdm_bg = pvecback[pba->index_bg_p_ncdm1+n_ncdm];
pseudo_p_ncdm = pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm];
w_ncdm = p_ncdm_bg/rho_ncdm_bg;
delta_ncdm[n_ncdm] = y[idx];
theta_ncdm[n_ncdm] = y[idx+1];
shear_ncdm[n_ncdm] = y[idx+2];
//This is the adiabatic sound speed:
delta_p_over_delta_rho_ncdm[n_ncdm] = w_ncdm*(1.0-1.0/(3.0+3.0*w_ncdm)*(3.0*w_ncdm-2.0+pseudo_p_ncdm/p_ncdm_bg));
idx += ppw->pv->l_max_ncdm[n_ncdm]+1;
}
}
else{
// We must integrate to find perturbations:
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_delta_ncdm = 0.0;
rho_plus_p_theta_ncdm = 0.0;
rho_plus_p_shear_ncdm = 0.0;
delta_p_ncdm = 0.0;
factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4);
for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) {
q = pba->q_ncdm[n_ncdm][index_q];
q2 = q*q;
epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2);
rho_delta_ncdm += q2*epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
rho_plus_p_theta_ncdm += q2*q*pba->w_ncdm[n_ncdm][index_q]*y[idx+1];
rho_plus_p_shear_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx+2];
delta_p_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
//Jump to next momentum bin:
idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1);
}
rho_delta_ncdm *= factor;
rho_plus_p_theta_ncdm *= k*factor;
rho_plus_p_shear_ncdm *= 2.0/3.0*factor;
delta_p_ncdm *= factor/3.;
delta_ncdm[n_ncdm] = rho_delta_ncdm/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
theta_ncdm[n_ncdm] = rho_plus_p_theta_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
shear_ncdm[n_ncdm] = rho_plus_p_shear_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
delta_p_over_delta_rho_ncdm[n_ncdm] = delta_p_ncdm/rho_delta_ncdm;
}
}
}
if (pba->has_dcdm == _TRUE_) {
delta_dcdm = y[ppw->pv->index_pt_delta_dcdm];
theta_dcdm = y[ppw->pv->index_pt_theta_dcdm];
}
if (pba->has_dr == _TRUE_) {
f_dr = pow(pvecback[pba->index_bg_a]*pvecback[pba->index_bg_a]/pba->H0,2)*pvecback[pba->index_bg_rho_dr];
delta_dr = y[ppw->pv->index_pt_F0_dr]/f_dr;
theta_dr = y[ppw->pv->index_pt_F0_dr+1]*3./4.*k/f_dr;
shear_dr = y[ppw->pv->index_pt_F0_dr+2]*0.5/f_dr;
}
if (pba->has_scf == _TRUE_){
if (ppt->gauge == synchronous){
delta_rho_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]);
}
else{
delta_rho_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]
- 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*ppw->pvecmetric[ppw->index_mt_psi]);
}
rho_plus_p_theta_scf = 1./3.*
k*k/a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_scf];
delta_scf = delta_rho_scf/pvecback[pba->index_bg_rho_scf];
theta_scf = rho_plus_p_theta_scf/(pvecback[pba->index_bg_rho_scf]+pvecback[pba->index_bg_p_scf]);
}
/* converting synchronous variables to newtonian ones */
/************************/
/* For use with CONCEPT */
/************************/
/* Do not convert to Newtonian gauge */
if (0 == 1) { /* (ppt->gauge == synchronous) { */
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
/* density and velocity perturbations (comment out if you wish to keep synchronous variables) */
delta_g -= 4. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
theta_g += k*k*alpha;
delta_b -= 3. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
theta_b += k*k*alpha;
if (pba->has_ur == _TRUE_) {
delta_ur -= 4. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
theta_ur += k*k*alpha;
}
if (pba->has_dr == _TRUE_) {
delta_dr += (-4.*a*H+a*pba->Gamma_dcdm*pvecback[pba->index_bg_rho_dcdm]/pvecback[pba->index_bg_rho_dr])*alpha;
theta_dr += k*k*alpha;
}
if (pba->has_cdm == _TRUE_) {
delta_cdm -= 3. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
theta_cdm += k*k*alpha;
}
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
/** - --> Do gauge transformation of delta, deltaP/rho (?) and theta using -= 3aH(1+w_ncdm) alpha for delta. */
}
}
if (pba->has_dcdm == _TRUE_) {
delta_dcdm += alpha*(-a*pba->Gamma_dcdm-3.*a*H);
theta_dcdm += k*k*alpha;
}
if (pba->has_scf == _TRUE_) {
delta_scf += alpha*(-3.0*H*(1.0+pvecback[pba->index_bg_p_scf]/pvecback[pba->index_bg_rho_scf]));
theta_scf += k*k*alpha;
}
}
// fprintf(ppw->perturb_output_file," ");
/** - --> Handle (re-)allocation */
if (ppt->scalar_perturbations_data[ppw->index_ikout] == NULL){
class_alloc(ppt->scalar_perturbations_data[ppw->index_ikout],
sizeof(double)*ppt->number_of_scalar_titles,
error_message);
ppt->size_scalar_perturbation_data[ppw->index_ikout] = 0;
}
else{
ppt->scalar_perturbations_data[ppw->index_ikout] =
realloc(ppt->scalar_perturbations_data[ppw->index_ikout],
sizeof(double)*(ppt->size_scalar_perturbation_data[ppw->index_ikout]+ppt->number_of_scalar_titles));
}
storeidx = 0;
dataptr = ppt->scalar_perturbations_data[ppw->index_ikout]+
ppt->size_scalar_perturbation_data[ppw->index_ikout];
ppt->size_scalar_perturbation_data[ppw->index_ikout] += ppt->number_of_scalar_titles;
class_store_double(dataptr, tau, _TRUE_, storeidx);
class_store_double(dataptr, pvecback[pba->index_bg_a], _TRUE_, storeidx);
class_store_double(dataptr, delta_g, _TRUE_, storeidx);
class_store_double(dataptr, theta_g, _TRUE_, storeidx);
class_store_double(dataptr, shear_g, _TRUE_, storeidx);
class_store_double(dataptr, pol0_g, _TRUE_, storeidx);
class_store_double(dataptr, pol1_g, _TRUE_, storeidx);
class_store_double(dataptr, pol2_g, _TRUE_, storeidx);
class_store_double(dataptr, delta_b, _TRUE_, storeidx);
class_store_double(dataptr, theta_b, _TRUE_, storeidx);
class_store_double(dataptr, psi, _TRUE_, storeidx);
class_store_double(dataptr, phi, _TRUE_, storeidx);
class_store_double(dataptr, phi_prime, _TRUE_, storeidx); // CGT
/* perturbed recombination */
class_store_double(dataptr, delta_temp, ppt->has_perturbed_recombination, storeidx);
class_store_double(dataptr, delta_chi, ppt->has_perturbed_recombination, storeidx);
/* Ultra relativistic species */
class_store_double(dataptr, delta_ur, pba->has_ur, storeidx);
class_store_double(dataptr, theta_ur, pba->has_ur, storeidx);
class_store_double(dataptr, shear_ur, pba->has_ur, storeidx);
/* Cold dark matter */
class_store_double(dataptr, delta_cdm, pba->has_cdm, storeidx);
class_store_double(dataptr, theta_cdm, pba->has_cdm, storeidx);
/* Non-cold Dark Matter */
if ((pba->has_ncdm == _TRUE_) && ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_))) {
idx = ppw->pv->index_pt_psi0_ncdm1;
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
class_store_double(dataptr, delta_ncdm[n_ncdm], _TRUE_, storeidx);
class_store_double(dataptr, theta_ncdm[n_ncdm], _TRUE_, storeidx);
class_store_double(dataptr, shear_ncdm[n_ncdm], _TRUE_, storeidx);
class_store_double(dataptr, delta_p_over_delta_rho_ncdm[n_ncdm], _TRUE_, storeidx);
/************************/
/* For use with CONCEPT */
/************************/
/* Include ncdm Theta_n_q_l_ncdm[n,q,l] in perturbation output */
class_store_double(dataptr, pba->M_ncdm[n_ncdm], _TRUE_, storeidx);
if (ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on) {
for (index_q=0; index_q<pba->q_size_ncdm[n_ncdm]; index_q++) {
class_store_double(dataptr, 0.0, _TRUE_, storeidx);
}
for (index_q=0; index_q<pba->q_size_ncdm[n_ncdm]; index_q++) {
class_store_double(dataptr, 0.0, _TRUE_, storeidx);
}
for (index_q=0; index_q<pba->q_size_ncdm[n_ncdm]; index_q++) {
for (index_l=0; index_l<=ppw->pv->l_max_ncdm[n_ncdm]; index_l++) {
class_store_double(dataptr, 0.0, _TRUE_, storeidx);
}
}
}
else {
for (index_q=0; index_q<pba->q_size_ncdm[n_ncdm]; index_q++) {
class_store_double(dataptr, pba->dlnf0_dlnq_ncdm[n_ncdm][index_q], _TRUE_, storeidx);
}
for (index_q=0; index_q<pba->q_size_ncdm[n_ncdm]; index_q++) {
class_store_double(dataptr, pba->q_ncdm[n_ncdm][index_q], _TRUE_, storeidx);
}
for (index_q=0; index_q<pba->q_size_ncdm[n_ncdm]; index_q++) {
dlnf0_dlnq = pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
for (index_l=0; index_l<=ppw->pv->l_max_ncdm[n_ncdm]; index_l++) {
class_store_double(dataptr, -y[idx]/dlnf0_dlnq, _TRUE_, storeidx);
idx++;
/* class_store_double(dataptr, y[idx], _TRUE_, storeidx); */
/* Jump to next momentum bin */
/* idx += (ppw->pv->l_max_ncdm[n_ncdm]+1); */
}
}
}
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
}
}
/* Decaying cold dark matter */
class_store_double(dataptr, delta_dcdm, pba->has_dcdm, storeidx);
class_store_double(dataptr, theta_dcdm, pba->has_dcdm, storeidx);
/* Decay radiation */
class_store_double(dataptr, delta_dr, pba->has_dr, storeidx);
class_store_double(dataptr, theta_dr, pba->has_dr, storeidx);
class_store_double(dataptr, shear_dr, pba->has_dr, storeidx);
/* Scalar field scf*/
class_store_double(dataptr, delta_scf, pba->has_scf, storeidx);
class_store_double(dataptr, theta_scf, pba->has_scf, storeidx);
//fprintf(ppw->perturb_output_file,"\n");
/************************/
/* For use with CONCEPT */
/************************/
/* Include fld in perturbation output */
double w_fld, dw_over_da_fld, integral_fld, theta_fld;
if (pba->has_fld) {
class_call(background_w_fld(pba, a, &w_fld, &dw_over_da_fld, &integral_fld),
pba->error_message, ppt->error_message);
class_store_double(dataptr, ppw->delta_rho_fld/pvecback[pba->index_bg_rho_fld],
pba->has_fld, storeidx);
/* For w_fld = -1 (Lambda), we have theta = 0 */
if (w_fld == -1.) {
theta_fld = 0.;
}
else {
theta_fld = ppw->rho_plus_p_theta_fld/
((1. + w_fld)*pvecback[pba->index_bg_rho_fld]);
}
class_store_double(dataptr, theta_fld, pba->has_fld, storeidx);
/**
* We choose to store cs2_fld = delta_p_fld/delta_rho_fld rather than
* simply delta_p_fld itself, as is done for massive neutrinos.
*
*/
class_store_double(dataptr,
ppw->delta_p_fld/ppw->delta_rho_fld, pba->has_fld, storeidx);
}
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
/************************/
/* For use with CONCEPT */
/************************/
/* Include theta_tot in perturbation output */
double rho_plus_p_tot = -2./3.*pvecback[pba->index_bg_H_prime]/a + 2./3.*pba->K/(a*a);
double theta_tot = ppw->rho_plus_p_theta/rho_plus_p_tot;
class_store_double(dataptr, theta_tot, _TRUE_, storeidx);
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
/************************/
/* For use with CONCEPT */
/************************/
/* Include h_prime in perturbation output */
class_store_double(dataptr, pvecmetric[ppw->index_mt_h_prime],
ppt->gauge == synchronous, storeidx);
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
/************************/
/* For use with CONCEPT */
/************************/
/**
* Include H_T_prime (in N-body gauge) in perturbation output.
* Here we make use of rho_plus_p_tot defined earlier.
*/
double p_tot_prime = 0.0;
/* Photons */
p_tot_prime += -3.*a*H*(1. + 1./3.)*1./3.*pvecback[pba->index_bg_rho_g];
/* Baryons have no pressure */
/* Ultra relativistic species */
if (pba->has_ur == _TRUE_)
p_tot_prime += -3.*a*H*(1. + 1./3.)*1./3.*pvecback[pba->index_bg_rho_ur];
/* Cold dark matter has no pressure */
/* Non-cold dark matter */
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++)
p_tot_prime += -a*H*(5.*pvecback[pba->index_bg_p_ncdm1+n_ncdm]
- pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm]);
}
/* Decaying cold dark matter has no pressure */
/* Decay radiation */
if (pba->has_dr == _TRUE_)
p_tot_prime += -3.*a*H*(1. + 1./3.)*1./3.*pvecback[pba->index_bg_rho_dr]
+ 1./3.*a*pba->Gamma_dcdm*pvecback[pba->index_bg_rho_dcdm];
/* Dark energy fluid */
if (pba->has_fld == _TRUE_) {
p_tot_prime += a*H*pvecback[pba->index_bg_rho_fld]
*(a*dw_over_da_fld - 3.*w_fld*(1. + w_fld));
}
/* Scalar field */
if (pba->has_scf == _TRUE_) {
p_tot_prime += -H/a*pvecback[pba->index_bg_phi_prime_scf]
*pvecback[pba->index_bg_phi_prime_scf]
- 2./3.*pvecback[pba->index_bg_dV_scf]*pvecback[pba->index_bg_phi_prime_scf];
}
/* Lambda has constant pressure */
double H_T_prime = 3.*a*H/rho_plus_p_tot*(
- ppw->delta_p
+ p_tot_prime*theta_tot/(k*k)
+ ppw->rho_plus_p_shear);
class_store_double(dataptr, H_T_prime, _TRUE_, storeidx);
/**************************/
/* ^For use with CONCEPT^ */
/**************************/
}
/** - for tensor modes: */
if (_tensors_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) {
delta_g = y[ppw->pv->index_pt_delta_g];
shear_g = y[ppw->pv->index_pt_shear_g];
l4_g = y[ppw->pv->index_pt_delta_g+4];
pol0_g = y[ppw->pv->index_pt_pol0_g];
pol2_g = y[ppw->pv->index_pt_pol2_g];
pol4_g = y[ppw->pv->index_pt_pol0_g+4];
}
else {
delta_g = -4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/pvecthermo[pth->index_th_dkappa]; //TBC
shear_g = 0.;
l4_g = 0.;
pol0_g = 1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/pvecthermo[pth->index_th_dkappa]; //TBC
pol2_g = 0.;
pol4_g = 0.;
}
}
else {
delta_g = 0.;
shear_g = 0.;
l4_g = 0.;
pol0_g = 0.;
pol2_g = 0.;
pol4_g = 0.;
}
if (ppt->evolve_tensor_ur == _TRUE_){
delta_ur = y[ppw->pv->index_pt_delta_ur];
shear_ur = y[ppw->pv->index_pt_shear_ur];
l4_ur = y[ppw->pv->index_pt_delta_ur+4];
}
/** - --> Handle (re-)allocation */
if (ppt->tensor_perturbations_data[ppw->index_ikout] == NULL){
class_alloc(ppt->tensor_perturbations_data[ppw->index_ikout],
sizeof(double)*ppt->number_of_tensor_titles,
error_message);
ppt->size_tensor_perturbation_data[ppw->index_ikout] = 0;
}
else{
ppt->tensor_perturbations_data[ppw->index_ikout] =
realloc(ppt->tensor_perturbations_data[ppw->index_ikout],
sizeof(double)*(ppt->size_tensor_perturbation_data[ppw->index_ikout]+ppt->number_of_tensor_titles));
}
storeidx = 0;
dataptr = ppt->tensor_perturbations_data[ppw->index_ikout]+
ppt->size_tensor_perturbation_data[ppw->index_ikout];
ppt->size_tensor_perturbation_data[ppw->index_ikout] += ppt->number_of_tensor_titles;
//fprintf(ppw->perturb_output_file," ");
class_store_double(dataptr, tau, _TRUE_, storeidx);
class_store_double(dataptr, pvecback[pba->index_bg_a], _TRUE_, storeidx);
class_store_double(dataptr, delta_g, _TRUE_, storeidx);
class_store_double(dataptr, shear_g, _TRUE_, storeidx);
class_store_double(dataptr, l4_g, _TRUE_, storeidx);
class_store_double(dataptr, pol0_g, _TRUE_, storeidx);
class_store_double(dataptr, pol2_g, _TRUE_, storeidx);
class_store_double(dataptr, pol4_g, _TRUE_, storeidx);
class_store_double(dataptr, y[ppw->pv->index_pt_gw], _TRUE_, storeidx);
class_store_double(dataptr, y[ppw->pv->index_pt_gwdot], _TRUE_, storeidx);
class_store_double(dataptr, delta_ur, ppt->evolve_tensor_ur, storeidx);
class_store_double(dataptr, shear_ur, ppt->evolve_tensor_ur, storeidx);
class_store_double(dataptr, l4_ur, ppt->evolve_tensor_ur, storeidx);
//printf("index_pt_delta+ur = %d\n",ppw->pv->index_pt_delta_ur);
/* Non-cold Dark Matter */
if (ppt->evolve_tensor_ncdm == _TRUE_) {
idx = ppw->pv->index_pt_psi0_ncdm1;
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_delta_ncdm = 0.0;
rho_plus_p_theta_ncdm = 0.0;
rho_plus_p_shear_ncdm = 0.0;
delta_p_ncdm = 0.0;
factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4);
for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) {
q = pba->q_ncdm[n_ncdm][index_q];
q2 = q*q;
epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2);
rho_delta_ncdm += q2*epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
rho_plus_p_theta_ncdm += q2*q*pba->w_ncdm[n_ncdm][index_q]*y[idx+1];
rho_plus_p_shear_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx+2];
delta_p_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
//Jump to next momentum bin:
idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1);
}
rho_delta_ncdm *= factor;
rho_plus_p_theta_ncdm *= k*factor;
rho_plus_p_shear_ncdm *= 2.0/3.0*factor;
delta_p_ncdm *= factor/3.;
delta_ncdm[n_ncdm] = rho_delta_ncdm/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
theta_ncdm[n_ncdm] = rho_plus_p_theta_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
shear_ncdm[n_ncdm] = rho_plus_p_shear_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
class_store_double(dataptr, delta_ncdm[n_ncdm], _TRUE_, storeidx);
class_store_double(dataptr, theta_ncdm[n_ncdm], _TRUE_, storeidx);
class_store_double(dataptr, shear_ncdm[n_ncdm], _TRUE_, storeidx);
}
}
// fprintf(ppw->perturb_output_file,"\n");
}
if (pba->has_ncdm == _TRUE_){
free(delta_ncdm);
free(theta_ncdm);
free(shear_ncdm);
free(delta_p_over_delta_rho_ncdm);
}
return _SUCCESS_;
}
/**
* Compute derivative of all perturbations to be integrated
*
* For each mode (scalar/vector/tensor) and each wavenumber k, this
* function computes the derivative of all values in the vector of
* perturbed variables to be integrated.
*
* This is one of the few functions in the code which is passed to the generic_integrator() routine.
* Since generic_integrator() should work with functions passed from various modules, the format of the arguments
* is a bit special:
* - fixed parameters and workspaces are passed through a generic pointer.
* generic_integrator() doesn't know what the content of this pointer is.
* - errors are not written as usual in pth->error_message, but in a generic
* error_message passed in the list of arguments.
*
* @param tau Input: conformal time
* @param y Input: vector of perturbations
* @param dy Output: vector of its derivatives (already allocated)
* @param parameters_and_workspace Input/Output: in input, fixed parameters (e.g. indices); in output, background and thermo quantities evaluated at tau.
* @param error_message Output: error message
*/
int perturb_derivs(double tau,
double * y,
double * dy,
void * parameters_and_workspace,
ErrorMsg error_message
) {
/** Summary: */
/** - define local variables */
/* multipole */
int l;
/* scale factor and other background quantities */
double a,a2,a_prime_over_a,R;
/* short-cut names for the fields of the input structure */
struct perturb_parameters_and_workspace * pppaw;
double k,k2;
int index_md;
struct precision * ppr;
struct background * pba;
struct thermo * pth;
struct perturbs * ppt;
struct perturb_workspace * ppw;
double * pvecback;
double * pvecthermo;
double * pvecmetric;
double * s_l;
struct perturb_vector * pv;
/* short-cut notations for the perturbations */
double delta_g=0.,theta_g=0.,shear_g=0.;
double delta_b,theta_b;
double cb2,cs2,ca2;
double metric_continuity=0.,metric_euler=0.,metric_shear=0.,metric_ufa_class=0.;
/* perturbed recombination (just to simplify the notation) */
double H0=0.,Nnow=0.,n_H=0.,fHe=0.;
double delta_temp=0.,delta_chi=0., chi=0.;
double alpha_rec=0.,delta_alpha_rec=0.;
double a_rad=0., Compton_CR =0.;
double Tb_in_K=0.;
/* Non-metric source terms for photons, i.e. \mathcal{P}^{(m)} from arXiv:1305.3261 */
double P0,P1,P2;
/* for use with fluid (fld): */
double w_fld,dw_over_da_fld,w_prime_fld,integral_fld;
/* for use with non-cold dark matter (ncdm): */
int index_q,n_ncdm,idx;
double q,epsilon,dlnf0_dlnq,qk_div_epsilon;
double rho_ncdm_bg,p_ncdm_bg,pseudo_p_ncdm,w_ncdm,ca2_ncdm,ceff2_ncdm=0.,cvis2_ncdm=0.;
/* for use with curvature */
double cotKgen, sqrt_absK;
double s2_squared, ssqrt3;
/* for use with dcdm and dr */
double f_dr, fprime_dr;
/** - rename the fields of the input structure (just to avoid heavy notations) */
pppaw = parameters_and_workspace;
k = pppaw->k;
k2=k*k;
index_md = pppaw->index_md;
ppr = pppaw->ppr;
pba = pppaw->pba;
pth = pppaw->pth;
ppt = pppaw->ppt;
ppw = pppaw->ppw;
s_l = ppw->s_l;
pvecback = ppw->pvecback;
pvecthermo = ppw->pvecthermo;
pvecmetric = ppw->pvecmetric;
pv = ppw->pv;
/** - get background/thermo quantities in this point */
class_call(background_at_tau(pba,
tau,
pba->normal_info,
pba->inter_closeby,
&(ppw->last_index_back),
pvecback),
pba->error_message,
error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_closeby,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
/** - get metric perturbations with perturb_einstein() */
class_call(perturb_einstein(ppr,
pba,
pth,
ppt,
index_md,
k,
tau,
y,
ppw),
ppt->error_message,
error_message);
/** - compute related background quantities */
a = pvecback[pba->index_bg_a];
a2 = a*a;
a_prime_over_a = pvecback[pba->index_bg_H] * a;
R = 4./3. * pvecback[pba->index_bg_rho_g]/pvecback[pba->index_bg_rho_b];
/** - Compute 'generalised cotK function of argument \f$ \sqrt{|K|}*\tau \f$, for closing hierarchy.
(see equation 2.34 in arXiv:1305.3261): */
if (pba->has_curvature == _FALSE_){
cotKgen = 1.0/(k*tau);
}
else{
sqrt_absK = sqrt(fabs(pba->K));
if (pba->K < 0)
cotKgen = sqrt_absK/k/tanh(sqrt_absK*tau);
else
cotKgen = sqrt_absK/k/tan(sqrt_absK*tau);
}
s2_squared = 1.-3.*pba->K/k2;
/** - for scalar modes: */
if (_scalars_) {
/** - --> (a) define short-cut notations for the scalar perturbations */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
delta_g = y[pv->index_pt_delta_g];
theta_g = y[pv->index_pt_theta_g];
}
delta_b = y[pv->index_pt_delta_b];
theta_b = y[pv->index_pt_theta_b];
cb2 = pvecthermo[pth->index_th_cb2];
/** - --> (b) perturbed recombination **/
if ((ppt->has_perturbed_recombination == _TRUE_)&&(ppw->approx[ppw->index_ap_tca]==(int)tca_off)){
delta_temp= y[ppw->pv->index_pt_perturbed_recombination_delta_temp];
delta_chi= y[ppw->pv->index_pt_perturbed_recombination_delta_chi];
chi=pvecthermo[pth->index_th_xe];
// Conversion of H0 in inverse seconds (pba->H0 is [H0/c] in inverse Mpcs)
H0 = pba->H0 * _c_ / _Mpc_over_m_;
//Computation of Nnow in SI units
Nnow = 3.*H0*H0*pba->Omega0_b*(1.-pth->YHe)/(8.*_PI_*_G_*_m_H_);
// total amount of hydrogen today
n_H = (pba->a_today/a)*(pba->a_today/a)*(pba->a_today/a)* Nnow;
// Helium-to-hydrogen ratio
fHe = pth->YHe / (_not4_*(1-pth->YHe));
// The constant such that rho_gamma = a_rad * T^4
a_rad = 8./15.*pow(_PI_,5)*pow(_k_B_,4)/pow(_c_*_h_P_,3);
// Compton cooling rate in Mpc^(-1)
Compton_CR = 8./3. *_sigma_ * a_rad /(_m_e_ * _c_ *_c_) *_Mpc_over_m_ ;
// Temperature is already in Kelvin
Tb_in_K = pvecthermo[pth->index_th_Tb];
// Alpha in m^3/s, cf. Recfast paper
alpha_rec = 1.14 * 4.309e-19*pow((Tb_in_K * 1e-4),-0.6166)/(1+0.6703*pow((Tb_in_K * 1e-4),0.53)) ;
// delta alpha, dimensionless
delta_alpha_rec= (-0.6166 + 0.6703 * pow((Tb_in_K * 1e-4),0.53)*(-0.6166-0.53))/(1+0.6703*pow((Tb_in_K * 1e-4),0.53)) * delta_temp;
} // end of perturbed recombination related quantities
/** - --> (c) compute metric-related quantities (depending on gauge; additional gauges can be coded below)
- Each continuity equation contains a term in (theta+metric_continuity) with
metric_continuity = (h_prime/2) in synchronous gauge, (-3 phi_prime) in newtonian gauge
- Each Euler equation contains a source term metric_euler with
metric_euler = 0 in synchronous gauge, (k2 psi) in newtonian gauge
- Each shear derivative equation contains a source term metric_shear equal to
metric_shear = (h_prime+6eta_prime)/2 in synchronous gauge, 0 in newtonian gauge
- metric_shear_prime is the derivative of metric_shear
- In the ufa_class approximation, the leading-order source term is (h_prime/2) in synchronous gauge,
(-3 (phi_prime+psi_prime)) in newtonian gauge: we approximate the later by (-6 phi_prime) */
if (ppt->gauge == synchronous) {
metric_continuity = pvecmetric[ppw->index_mt_h_prime]/2.;
metric_euler = 0.;
metric_shear = k2 * pvecmetric[ppw->index_mt_alpha];
//metric_shear_prime = k2 * pvecmetric[ppw->index_mt_alpha_prime];
metric_ufa_class = pvecmetric[ppw->index_mt_h_prime]/2.;
}
if (ppt->gauge == newtonian) {
metric_continuity = -3.*pvecmetric[ppw->index_mt_phi_prime];
metric_euler = k2*pvecmetric[ppw->index_mt_psi];
metric_shear = 0.;
//metric_shear_prime = 0.;
metric_ufa_class = -6.*pvecmetric[ppw->index_mt_phi_prime];
}
/** - --> (d) if some approximation schemes are turned on, enforce a few y[] values computed in perturb_einstein */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
delta_g = ppw->rsa_delta_g;
theta_g = ppw->rsa_theta_g;
}
/** - --> (e) BEGINNING OF ACTUAL SYSTEM OF EQUATIONS OF EVOLUTION */
/* Note concerning perturbed recombination: $cb2*delta_b$ must be replaced everywhere by $cb2*(delta_b+delta_temp)$. If perturbed recombination is not required, delta_temp is equal to zero. */
/** - ---> photon temperature density */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
dy[pv->index_pt_delta_g] = -4./3.*(theta_g+metric_continuity);
}
/** - ---> baryon density */
dy[pv->index_pt_delta_b] = -(theta_b+metric_continuity);
/** - ---> baryon velocity (depends on tight-coupling approximation=tca) */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
/* without tca */
/** - ----> perturbed recombination has an impact **/
dy[pv->index_pt_theta_b] =
- a_prime_over_a*theta_b
+ metric_euler
+ k2*cb2*(delta_b+delta_temp)
+ R*pvecthermo[pth->index_th_dkappa]*(theta_g-theta_b);
}
else {
/* with tca */
class_call(perturb_tca_slip_and_shear(y,pppaw,error_message),
error_message,
error_message);
/* perturbed recombination has an impact **/
dy[pv->index_pt_theta_b] =
(-a_prime_over_a*theta_b
+k2*(cb2*(delta_b+delta_temp)+R*(delta_g/4.-s2_squared*ppw->tca_shear_g))
+R*ppw->tca_slip)/(1.+R)
+metric_euler;
}
/** - ---> photon temperature higher momenta and photon polarization (depend on tight-coupling approximation) */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
/** - ----> if photon tight-coupling is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
/** - -----> define \f$ \Pi = G_{\gamma 0} + G_{\gamma 2} + F_{\gamma 2} \f$ */
P0 = (y[pv->index_pt_pol0_g] + y[pv->index_pt_pol2_g] + 2.*s_l[2]*y[pv->index_pt_shear_g])/8.;
/** - -----> photon temperature velocity */
dy[pv->index_pt_theta_g] =
k2*(delta_g/4.-s2_squared*y[pv->index_pt_shear_g])
+ metric_euler
+ pvecthermo[pth->index_th_dkappa]*(theta_b-theta_g);
/** - -----> photon temperature shear */
dy[pv->index_pt_shear_g] =
0.5*(8./15.*(theta_g+metric_shear)
-3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_l3_g]
-pvecthermo[pth->index_th_dkappa]*(2.*y[pv->index_pt_shear_g]-4./5./s_l[2]*P0));
/** - -----> photon temperature l=3 */
l = 3;
dy[pv->index_pt_l3_g] = k/(2.0*l+1.0)*
(l*s_l[l]*2.*s_l[2]*y[pv->index_pt_shear_g]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_g+1])
- pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g];
/** - -----> photon temperature l>3 */
for (l = 4; l < pv->l_max_g; l++) {
dy[pv->index_pt_delta_g+l] = k/(2.0*l+1.0)*
(l*s_l[l]*y[pv->index_pt_delta_g+l-1]-(l+1)*s_l[l+1]*y[pv->index_pt_delta_g+l+1])
- pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
}
/** - -----> photon temperature lmax */
l = pv->l_max_g; /* l=lmax */
dy[pv->index_pt_delta_g+l] =
k*(s_l[l]*y[pv->index_pt_delta_g+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_g+l])
- pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
/** - -----> photon polarization l=0 */
dy[pv->index_pt_pol0_g] =
-k*y[pv->index_pt_pol0_g+1]
-pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-4.*P0);
/** - -----> photon polarization l=1 */
dy[pv->index_pt_pol1_g] =
k/3.*(y[pv->index_pt_pol1_g-1]-2.*s_l[2]*y[pv->index_pt_pol1_g+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol1_g];
/** - -----> photon polarization l=2 */
dy[pv->index_pt_pol2_g] =
k/5.*(2.*s_l[2]*y[pv->index_pt_pol2_g-1]-3.*s_l[3]*y[pv->index_pt_pol2_g+1])
-pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol2_g]-4./5.*P0);
/** - -----> photon polarization l>2 */
for (l=3; l < pv->l_max_pol_g; l++)
dy[pv->index_pt_pol0_g+l] = k/(2.*l+1)*
(l*s_l[l]*y[pv->index_pt_pol0_g+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
/** - -----> photon polarization lmax_pol */
l = pv->l_max_pol_g;
dy[pv->index_pt_pol0_g+l] =
k*(s_l[l]*y[pv->index_pt_pol0_g+l-1]-(l+1)*cotKgen*y[pv->index_pt_pol0_g+l])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
}
/** - ----> if photon tight-coupling is on: */
else {
/** - -----> in that case, only need photon velocity */
/* perturbed recombination has an impact **/
dy[pv->index_pt_theta_g] =
-(dy[pv->index_pt_theta_b]+a_prime_over_a*theta_b-cb2*k2*(delta_b+delta_temp))/R
+k2*(0.25*delta_g-s2_squared*ppw->tca_shear_g)+(1.+R)/R*metric_euler;
}
}
/** - ---> cdm */
if (pba->has_cdm == _TRUE_) {
/** - ----> newtonian gauge: cdm density and velocity */
if (ppt->gauge == newtonian) {
dy[pv->index_pt_delta_cdm] = -(y[pv->index_pt_theta_cdm]+metric_continuity); /* cdm density */
dy[pv->index_pt_theta_cdm] = - a_prime_over_a*y[pv->index_pt_theta_cdm] + metric_euler; /* cdm velocity */
}
/** - ----> synchronous gauge: cdm density only (velocity set to zero by definition of the gauge) */
if (ppt->gauge == synchronous) {
dy[pv->index_pt_delta_cdm] = -metric_continuity; /* cdm density */
}
}
/* perturbed recombination */
/* computes the derivatives of delta x_e and delta T_b */
if((ppt->has_perturbed_recombination == _TRUE_)&&(ppw->approx[ppw->index_ap_tca] == (int)tca_off)){
// alpha * n_H is in inverse seconds, so we have to multiply it by Mpc_in_sec
dy[ppw->pv->index_pt_perturbed_recombination_delta_chi] = - alpha_rec* a * chi*n_H *(delta_alpha_rec + delta_chi + delta_b) * _Mpc_over_m_ / _c_ ;
// see the documentation for this formula
dy[ppw->pv->index_pt_perturbed_recombination_delta_temp] = 2./3. * dy[ppw->pv->index_pt_delta_b] - a * Compton_CR * pow(pba->T_cmb/a, 4) * chi / (1.+chi+fHe) * ( (1.-pba->T_cmb*pba->a_today/a/pvecthermo[pth->index_th_Tb])*(delta_g + delta_chi*(1.+fHe)/(1.+chi+fHe)) + pba->T_cmb*pba->a_today/a/pvecthermo[pth->index_th_Tb] *(delta_temp - 1./4. * delta_g) );
}
/** - ---> dcdm and dr */
if (pba->has_dcdm == _TRUE_) {
/** - ----> dcdm */
dy[pv->index_pt_delta_dcdm] = -(y[pv->index_pt_theta_dcdm]+metric_continuity)
- a * pba->Gamma_dcdm / k2 * metric_euler; /* dcdm density */
dy[pv->index_pt_theta_dcdm] = - a_prime_over_a*y[pv->index_pt_theta_dcdm] + metric_euler; /* dcdm velocity */
}
/** - ---> dr */
if ((pba->has_dcdm == _TRUE_)&&(pba->has_dr == _TRUE_)) {
/* f = rho_dr*a^4/rho_crit_today. In CLASS density units
rho_crit_today = H0^2.
*/
f_dr = pow(pow(a/pba->a_today,2)/pba->H0,2)*pvecback[pba->index_bg_rho_dr];
fprime_dr = pba->Gamma_dcdm*pvecback[pba->index_bg_rho_dcdm]*pow(a,5)/pow(pba->H0,2);
/** - ----> dr F0 */
dy[pv->index_pt_F0_dr] = -k*y[pv->index_pt_F0_dr+1]-4./3.*metric_continuity*f_dr+
fprime_dr*(y[pv->index_pt_delta_dcdm]+metric_euler/k2);
/** - ----> dr F1 */
dy[pv->index_pt_F0_dr+1] = k/3.*y[pv->index_pt_F0_dr]-2./3.*k*y[pv->index_pt_F0_dr+2]*s2_squared +
4*metric_euler/(3.*k)*f_dr + fprime_dr/k*y[pv->index_pt_theta_dcdm];
/** - ----> exact dr F2 */
dy[pv->index_pt_F0_dr+2] = 8./15.*(3./4.*k*y[pv->index_pt_F0_dr+1]+metric_shear*f_dr) -3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_F0_dr+3];
/** - ----> exact dr l=3 */
l = 3;
dy[pv->index_pt_F0_dr+3] = k/(2.*l+1.)*
(l*s_l[l]*s_l[2]*y[pv->index_pt_F0_dr+2]-(l+1.)*s_l[l+1]*y[pv->index_pt_F0_dr+4]);
/** - ----> exact dr l>3 */
for (l = 4; l < pv->l_max_dr; l++) {
dy[pv->index_pt_F0_dr+l] = k/(2.*l+1)*
(l*s_l[l]*y[pv->index_pt_F0_dr+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_F0_dr+l+1]);
}
/** - ----> exact dr lmax_dr */
l = pv->l_max_dr;
dy[pv->index_pt_F0_dr+l] =
k*(s_l[l]*y[pv->index_pt_F0_dr+l-1]-(1.+l)*cotKgen*y[pv->index_pt_F0_dr+l]);
}
/** - ---> fluid (fld) */
if (pba->has_fld == _TRUE_) {
if (pba->use_ppf == _FALSE_){
/** - ----> factors w, w_prime, adiabatic sound speed ca2 (all three background-related),
plus actual sound speed in the fluid rest frame cs2 */
class_call(background_w_fld(pba,a,&w_fld,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
w_prime_fld = dw_over_da_fld * a_prime_over_a * a;
ca2 = w_fld - w_prime_fld / 3. / (1.+w_fld) / a_prime_over_a;
cs2 = pba->cs2_fld;
/** - ----> fluid density */
dy[pv->index_pt_delta_fld] =
-(1+w_fld)*(y[pv->index_pt_theta_fld]+metric_continuity)
-3.*(cs2-w_fld)*a_prime_over_a*y[pv->index_pt_delta_fld]
-9.*(1+w_fld)*(cs2-ca2)*a_prime_over_a*a_prime_over_a*y[pv->index_pt_theta_fld]/k2;
/** - ----> fluid velocity */
dy[pv->index_pt_theta_fld] = /* fluid velocity */
-(1.-3.*cs2)*a_prime_over_a*y[pv->index_pt_theta_fld]
+cs2*k2/(1.+w_fld)*y[pv->index_pt_delta_fld]
+metric_euler;
}
else {
dy[pv->index_pt_Gamma_fld] = ppw->Gamma_prime_fld; /* Gamma variable of PPF formalism */
}
}
/** - ---> scalar field (scf) */
if (pba->has_scf == _TRUE_) {
/** - ----> field value */
dy[pv->index_pt_phi_scf] = y[pv->index_pt_phi_prime_scf];
/** - ----> Klein Gordon equation */
dy[pv->index_pt_phi_prime_scf] = - 2.*a_prime_over_a*y[pv->index_pt_phi_prime_scf]
- metric_continuity*pvecback[pba->index_bg_phi_prime_scf] // metric_continuity = h'/2
- (k2 + a2*pvecback[pba->index_bg_ddV_scf])*y[pv->index_pt_phi_scf]; //checked
}
/** - ---> ultra-relativistic neutrino/relics (ur) */
if (pba->has_ur == _TRUE_) {
/** - ----> if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
/** - -----> ur density */
dy[pv->index_pt_delta_ur] =
// standard term
-4./3.*(y[pv->index_pt_theta_ur] + metric_continuity)
// non-standard term, non-zero if if ceff2_ur not 1/3
+(1.-ppt->three_ceff2_ur)*a_prime_over_a*(y[pv->index_pt_delta_ur] + 4.*a_prime_over_a*y[pv->index_pt_theta_ur]/k/k);
/** - -----> ur velocity */
dy[pv->index_pt_theta_ur] =
// standard term with extra coefficient (3 ceff2_ur), normally equal to one
k2*(ppt->three_ceff2_ur*y[pv->index_pt_delta_ur]/4.-s2_squared*y[pv->index_pt_shear_ur]) + metric_euler
// non-standard term, non-zero if ceff2_ur not 1/3
-(1.-ppt->three_ceff2_ur)*a_prime_over_a*y[pv->index_pt_theta_ur];
if(ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
/** - -----> exact ur shear */
dy[pv->index_pt_shear_ur] =
0.5*(
// standard term
8./15.*(y[pv->index_pt_theta_ur]+metric_shear)-3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_shear_ur+1]
// non-standard term, non-zero if cvis2_ur not 1/3
-(1.-ppt->three_cvis2_ur)*(8./15.*(y[pv->index_pt_theta_ur]+metric_shear)));
/** - -----> exact ur l=3 */
l = 3;
dy[pv->index_pt_l3_ur] = k/(2.*l+1.)*
(l*2.*s_l[l]*s_l[2]*y[pv->index_pt_shear_ur]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_ur+1]);
/** - -----> exact ur l>3 */
for (l = 4; l < pv->l_max_ur; l++) {
dy[pv->index_pt_delta_ur+l] = k/(2.*l+1)*
(l*s_l[l]*y[pv->index_pt_delta_ur+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_ur+l+1]);
}
/** - -----> exact ur lmax_ur */
l = pv->l_max_ur;
dy[pv->index_pt_delta_ur+l] =
k*(s_l[l]*y[pv->index_pt_delta_ur+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_ur+l]);
}
else {
/** - -----> in fluid approximation (ufa): only ur shear needed */
//TBC: curvature?
/* a la Ma & Bertschinger */
if (ppr->ur_fluid_approximation == ufa_mb) {
dy[pv->index_pt_shear_ur] =
-3./tau*y[pv->index_pt_shear_ur]
+2./3.*(y[pv->index_pt_theta_ur]+metric_shear);
}
/* a la Hu */
if (ppr->ur_fluid_approximation == ufa_hu) {
dy[pv->index_pt_shear_ur] =
-3.*a_prime_over_a*y[pv->index_pt_shear_ur]
+2./3.*(y[pv->index_pt_theta_ur]+metric_shear);
}
/* a la CLASS */
if (ppr->ur_fluid_approximation == ufa_CLASS) {
dy[pv->index_pt_shear_ur] =
-3./tau*y[pv->index_pt_shear_ur]
+2./3.*(y[pv->index_pt_theta_ur]+metric_ufa_class);
}
}
}
}
/** - ---> non-cold dark matter (ncdm): massive neutrinos, WDM, etc. */
//TBC: curvature in all ncdm
if (pba->has_ncdm == _TRUE_) {
idx = pv->index_pt_psi0_ncdm1;
/** - ----> first case: use a fluid approximation (ncdmfa) */
//TBC: curvature
if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on) {
/** - -----> loop over species */
for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) {
/** - -----> define intermediate quantitites */
rho_ncdm_bg = pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; /* background density */
p_ncdm_bg = pvecback[pba->index_bg_p_ncdm1+n_ncdm]; /* background pressure */
pseudo_p_ncdm = pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm]; /* pseudo-pressure (see CLASS IV paper) */
w_ncdm = p_ncdm_bg/rho_ncdm_bg; /* equation of state parameter */
ca2_ncdm = w_ncdm/3.0/(1.0+w_ncdm)*(5.0-pseudo_p_ncdm/p_ncdm_bg); /* adiabatic sound speed */
/* c_eff is (delta p / delta rho) in the gauge under
consideration (not in the gauge comoving with the
fluid) */
/* c_vis is introduced in order to close the system */
/* different ansatz for sound speed c_eff and viscosity speed c_vis */
if (ppr->ncdm_fluid_approximation == ncdmfa_mb) {
ceff2_ncdm = ca2_ncdm;
cvis2_ncdm = 3.*w_ncdm*ca2_ncdm;
}
if (ppr->ncdm_fluid_approximation == ncdmfa_hu) {
ceff2_ncdm = ca2_ncdm;
cvis2_ncdm = w_ncdm;
}
if (ppr->ncdm_fluid_approximation == ncdmfa_CLASS) {
ceff2_ncdm = ca2_ncdm;
cvis2_ncdm = 3.*w_ncdm*ca2_ncdm;
}
/** - -----> exact continuity equation */
dy[idx] = -(1.0+w_ncdm)*(y[idx+1]+metric_continuity)-
3.0*a_prime_over_a*(ceff2_ncdm-w_ncdm)*y[idx];
/** - -----> exact euler equation */
dy[idx+1] = -a_prime_over_a*(1.0-3.0*ca2_ncdm)*y[idx+1]+
ceff2_ncdm/(1.0+w_ncdm)*k2*y[idx]-k2*y[idx+2]
+ metric_euler;
/** - -----> different ansatz for approximate shear derivative */
if (ppr->ncdm_fluid_approximation == ncdmfa_mb) {
dy[idx+2] = -3.0*(a_prime_over_a*(2./3.-ca2_ncdm-pseudo_p_ncdm/p_ncdm_bg/3.)+1./tau)*y[idx+2]
+8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_shear);
}
if (ppr->ncdm_fluid_approximation == ncdmfa_hu) {
dy[idx+2] = -3.0*a_prime_over_a*ca2_ncdm/w_ncdm*y[idx+2]
+8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_shear);
}
if (ppr->ncdm_fluid_approximation == ncdmfa_CLASS) {
dy[idx+2] = -3.0*(a_prime_over_a*(2./3.-ca2_ncdm-pseudo_p_ncdm/p_ncdm_bg/3.)+1./tau)*y[idx+2]
+8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_ufa_class);
}
/** - -----> jump to next species */
idx += pv->l_max_ncdm[n_ncdm]+1;
}
}
/** - ----> second case: use exact equation (Boltzmann hierarchy on momentum grid) */
else {
/** - -----> loop over species */
for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) {
/** - -----> loop over momentum */
for (index_q=0; index_q < pv->q_size_ncdm[n_ncdm]; index_q++) {
/** - -----> define intermediate quantities */
dlnf0_dlnq = pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
q = pba->q_ncdm[n_ncdm][index_q];
epsilon = sqrt(q*q+a2*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]);
qk_div_epsilon = k*q/epsilon;
/** - -----> ncdm density for given momentum bin */
dy[idx] = -qk_div_epsilon*y[idx+1]+metric_continuity*dlnf0_dlnq/3.;
/** - -----> ncdm velocity for given momentum bin */
dy[idx+1] = qk_div_epsilon/3.0*(y[idx] - 2*s_l[2]*y[idx+2])
-epsilon*metric_euler/(3*q*k)*dlnf0_dlnq;
/** - -----> ncdm shear for given momentum bin */
dy[idx+2] = qk_div_epsilon/5.0*(2*s_l[2]*y[idx+1]-3.*s_l[3]*y[idx+3])
-s_l[2]*metric_shear*2./15.*dlnf0_dlnq;
/** - -----> ncdm l>3 for given momentum bin */
for(l=3; l<pv->l_max_ncdm[n_ncdm]; l++){
dy[idx+l] = qk_div_epsilon/(2.*l+1.0)*(l*s_l[l]*y[idx+(l-1)]-(l+1.)*s_l[l+1]*y[idx+(l+1)]);
}
/** - -----> ncdm lmax for given momentum bin (truncation as in Ma and Bertschinger)
but with curvature taken into account a la arXiv:1305.3261 */
dy[idx+l] = qk_div_epsilon*y[idx+l-1]-(1.+l)*k*cotKgen*y[idx+l];
/** - -----> jump to next momentum bin or species */
idx += (pv->l_max_ncdm[n_ncdm]+1);
}
}
}
}
/** - ---> metric */
/** - ---> eta of synchronous gauge */
if (ppt->gauge == synchronous) {
dy[pv->index_pt_eta] = pvecmetric[ppw->index_mt_eta_prime];
}
if (ppt->gauge == newtonian) {
dy[pv->index_pt_phi] = pvecmetric[ppw->index_mt_phi_prime];
}
}
/** - vector mode */
if (_vectors_) {
fprintf(stderr,"we are in vectors\n");
ssqrt3 = sqrt(1.-2.*pba->K/k2);
cb2 = pvecthermo[pth->index_th_cb2];
/** - --> baryon velocity */
if (ppt->gauge == synchronous) {
dy[pv->index_pt_theta_b] = -(1-3.*cb2)*a_prime_over_a*y[pv->index_pt_theta_b]
- pvecthermo[pth->index_th_dkappa]*(_SQRT2_/4.*delta_g + y[pv->index_pt_theta_b]);
}
else if (ppt->gauge == newtonian) {
dy[pv->index_pt_theta_b] = -(1-3.*cb2)*a_prime_over_a*y[pv->index_pt_theta_b]
- _SQRT2_/4.*pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b])
+ pvecmetric[ppw->index_mt_V_prime]+(1.-3.*cb2)*a_prime_over_a*y[pv->index_pt_V];
}
/*
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) {
*/
/* short-cut notations for the tensor perturbations */
delta_g = y[pv->index_pt_delta_g];
theta_g = y[pv->index_pt_theta_g];
shear_g = y[pv->index_pt_shear_g];
/* (P^{(1)}) (see Eq. B.23 in 1305.3261)*/
P1 = -_SQRT6_/40.*(
4./(3.*k)*theta_g //F1
+y[pv->index_pt_delta_g+3]
+2.*y[pv->index_pt_pol0_g]
+10./7.*y[pv->index_pt_pol2_g]
-4./7.*y[pv->index_pt_pol0_g+4]);
if (ppt->gauge == synchronous) {
/* photon density (delta_g = F_0) */
dy[pv->index_pt_delta_g] =
-4./3.*theta_g
-pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b]);
/* photon velocity (theta_g = (3k/4)*F_1) */
dy[pv->index_pt_theta_g] =
k2*(delta_g/4.-s_l[2]*shear_g)
-pvecthermo[pth->index_th_dkappa]*(theta_g+4.0/_SQRT6_*P1)
+4.0/(3.0*_SQRT2_)*ssqrt3*y[pv->index_pt_hv_prime];
}
else if (ppt->gauge == newtonian) {
/* photon density (delta_g = F_0) */
dy[pv->index_pt_delta_g] =
-4./3.*theta_g
-pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b])
-2.*_SQRT2_*pvecmetric[ppw->index_mt_V_prime];
/* photon velocity (theta_g = (3k/4)*F_1) */
dy[pv->index_pt_theta_g] =
k2*(delta_g/4.-s_l[2]*shear_g)
-pvecthermo[pth->index_th_dkappa]*(theta_g+4.0/_SQRT6_*P1);
}
/* photon shear (shear_g = F_2/2) */
dy[pv->index_pt_shear_g] =
4./15.*s_l[2]*theta_g-3./10.*k*s_l[3]*y[pv->index_pt_shear_g+1]
-pvecthermo[pth->index_th_dkappa]*shear_g;
/* photon l=3 */
dy[pv->index_pt_l3_g] =
k/7.*(6.*s_l[3]*shear_g-4.*s_l[4]*y[pv->index_pt_l3_g+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g];
/* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */
for (l=4; l < pv->l_max_g; l++)
dy[pv->index_pt_delta_g+l] =
k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_delta_g+l-1]
-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_g+l+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
/* l=lmax */
l = pv->l_max_g;
dy[pv->index_pt_delta_g+l] =
k*(s_l[l]*y[pv->index_pt_delta_g+l-1]
-(1.+l)*cotKgen*y[pv->index_pt_delta_g+l])
- pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
/* photon polarization, l=0 (pol0_g = G_0)*/
dy[pv->index_pt_pol0_g] =
-k*y[pv->index_pt_pol0_g+1]
-pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-_SQRT6_*P1);
/* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */
for (l=1; l < pv->l_max_pol_g; l++)
dy[pv->index_pt_pol0_g+l] =
k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_pol0_g+l-1]
-(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
/* l=lmax */
l = pv->l_max_pol_g;
dy[pv->index_pt_pol0_g+l] =
k*(s_l[l]*y[pv->index_pt_pol0_g+l-1]
-(l+1.)*cotKgen*y[pv->index_pt_pol0_g+l])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
/*
}
}
*/
if (ppt->gauge == synchronous) {
/* Vector metric perturbation in synchronous gauge: */
dy[pv->index_pt_hv_prime] = pvecmetric[ppw->index_mt_hv_prime_prime];
}
else if (ppt->gauge == newtonian){
/* Vector metric perturbation in Newtonian gauge: */
dy[pv->index_pt_V] = pvecmetric[ppw->index_mt_V_prime];
}
}
/** - tensor modes: */
if (_tensors_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) {
/* short-cut notations for the tensor perturbations */
delta_g = y[pv->index_pt_delta_g];
theta_g = y[pv->index_pt_theta_g];
shear_g = y[pv->index_pt_shear_g];
/* (P^{(2)}) */
P2 =-1.0/_SQRT6_*(
1./10.*delta_g
+2./7.*shear_g
+3./70.*y[pv->index_pt_delta_g+4]
-3./5.*y[pv->index_pt_pol0_g]
+6./7.*y[pv->index_pt_pol2_g]
-3./70.*y[pv->index_pt_pol0_g+4]);
/* above expression from paper, expression below matches old class but is not correct
P2 = -1.0/_SQRT6_*(
1./10.*delta_g
+2./35.*shear_g
+1./210.*y[pv->index_pt_delta_g+4]
-3./5.*y[pv->index_pt_pol0_g]
+6./35.*y[pv->index_pt_pol2_g]
-1./210.*y[pv->index_pt_pol0_g+4]
);
*/
/* photon density (delta_g = F_0) */
dy[pv->index_pt_delta_g] =
-4./3.*theta_g
-pvecthermo[pth->index_th_dkappa]*(delta_g+_SQRT6_*P2)
//+y[pv->index_pt_gwdot];
+_SQRT6_*y[pv->index_pt_gwdot]; //TBC
/* photon velocity (theta_g = (3k/4)*F_1) */
dy[pv->index_pt_theta_g] =
k2*(delta_g/4.-s_l[2]*shear_g)
-pvecthermo[pth->index_th_dkappa]*theta_g;
/* photon shear (shear_g = F_2/2) */
dy[pv->index_pt_shear_g] =
4./15.*s_l[2]*theta_g-3./10.*k*s_l[3]*y[pv->index_pt_shear_g+1]
-pvecthermo[pth->index_th_dkappa]*shear_g;
/* photon l=3 */
dy[pv->index_pt_l3_g] =
k/7.*(6.*s_l[3]*shear_g-4.*s_l[4]*y[pv->index_pt_l3_g+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g];
/* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */
for (l=4; l < pv->l_max_g; l++)
dy[pv->index_pt_delta_g+l] =
k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_delta_g+l-1]
-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_g+l+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
/* l=lmax */
l = pv->l_max_g;
dy[pv->index_pt_delta_g+l] =
k*(s_l[l]*y[pv->index_pt_delta_g+l-1]
-(1.+l)*cotKgen*y[pv->index_pt_delta_g+l])
- pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
/* photon polarization, l=0 (pol0_g = G_0)*/
dy[pv->index_pt_pol0_g] =
-k*y[pv->index_pt_pol0_g+1]
-pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-_SQRT6_*P2);
/* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */
for (l=1; l < pv->l_max_pol_g; l++)
dy[pv->index_pt_pol0_g+l] =
k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_pol0_g+l-1]
-(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
/* l=lmax */
l = pv->l_max_pol_g;
dy[pv->index_pt_pol0_g+l] =
k*(s_l[l]*y[pv->index_pt_pol0_g+l-1]
-(l+1.)*cotKgen*y[pv->index_pt_pol0_g+l])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
}
}
if (ppt->evolve_tensor_ur == _TRUE_) {
dy[pv->index_pt_delta_ur] = -4./3.*y[pv->index_pt_theta_ur]+_SQRT6_*y[pv->index_pt_gwdot];
dy[pv->index_pt_theta_ur] = k2*(y[pv->index_pt_delta_ur]/4.-s2_squared*y[pv->index_pt_shear_ur]);
dy[pv->index_pt_shear_ur] = (4./15.*y[pv->index_pt_theta_ur]
-3./10.*k*s_l[3]/s_l[2]*y[pv->index_pt_shear_ur+1]);
l = 3;
dy[pv->index_pt_l3_ur] = k/(2.*l+1.)*
(l*2.*s_l[l]*s_l[2]*y[pv->index_pt_shear_ur]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_ur+1]);
for (l = 4; l < pv->l_max_ur; l++) {
dy[pv->index_pt_delta_ur+l] = k/(2.*l+1)*
(l*s_l[l]*y[pv->index_pt_delta_ur+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_ur+l+1]);
}
l = pv->l_max_ur;
dy[pv->index_pt_delta_ur+l] =
k*(s_l[l]*y[pv->index_pt_delta_ur+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_ur+l]);
}
/** - --> non-cold dark matter (ncdm): massive neutrinos, WDM, etc. */
//TBC: curvature in all ncdm
if (ppt->evolve_tensor_ncdm == _TRUE_) {
idx = pv->index_pt_psi0_ncdm1;
/** - ---> loop over species */
for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) {
/** - ----> loop over momentum */
for (index_q=0; index_q < pv->q_size_ncdm[n_ncdm]; index_q++) {
/** - ----> define intermediate quantities */
dlnf0_dlnq = pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
q = pba->q_ncdm[n_ncdm][index_q];
epsilon = sqrt(q*q+a2*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]);
qk_div_epsilon = k*q/epsilon;
/** - ----> ncdm density for given momentum bin */
dy[idx] = -qk_div_epsilon*y[idx+1]-0.25*_SQRT6_*y[pv->index_pt_gwdot]*dlnf0_dlnq;
/** - ----> ncdm l>0 for given momentum bin */
for(l=1; l<pv->l_max_ncdm[n_ncdm]; l++){
dy[idx+l] = qk_div_epsilon/(2.*l+1.0)*(l*s_l[l]*y[idx+(l-1)]-(l+1.)*s_l[l+1]*y[idx+(l+1)]);
}
/** - ----> ncdm lmax for given momentum bin (truncation as in Ma and Bertschinger)
but with curvature taken into account a la arXiv:1305.3261 */
dy[idx+l] = qk_div_epsilon*y[idx+l-1]-(1.+l)*k*cotKgen*y[idx+l];
/** - ----> jump to next momentum bin or species */
idx += (pv->l_max_ncdm[n_ncdm]+1);
}
}
}
/** - --> tensor metric perturbation h (gravitational waves) */
dy[pv->index_pt_gw] = y[pv->index_pt_gwdot];
/** - --> its time-derivative */
dy[pv->index_pt_gwdot] = pvecmetric[ppw->index_mt_gw_prime_prime];
}
return _SUCCESS_;
}
int perturb_tca_slip_and_shear(double * y,
void * parameters_and_workspace,
ErrorMsg error_message
) {
/** Summary: */
/** - define local variables */
/* scale factor and other background quantities */
double a,a_prime_over_a,a_primeprime_over_a,R;
/* useful terms for tight-coupling approximation */
double slip=0.;
double tau_c=0.,dtau_c=0.;
double theta_prime,shear_g_prime=0.,theta_prime_prime;
double g0,g0_prime,g0_prime_prime;
double F=0.,F_prime=0.,F_prime_prime=0.;
/* short-cut names for the fields of the input structure */
struct perturb_parameters_and_workspace * pppaw;
double k,k2;
struct precision * ppr;
struct background * pba;
struct thermo * pth;
struct perturbs * ppt;
struct perturb_workspace * ppw;
double * pvecback;
double * pvecthermo;
double * pvecmetric;
struct perturb_vector * pv;
/* short-cut notations for the perturbations */
double delta_g=0.,theta_g=0.,shear_g=0.;
double delta_b,theta_b;
double Delta;
double cb2;
double metric_continuity=0.,metric_euler=0.,metric_shear=0.,metric_shear_prime=0.;
/* perturbed recombination */
double delta_temp=0.;
/* for use with curvature */
double s2_squared;
/** - rename the fields of the input structure (just to avoid heavy notations) */
pppaw = parameters_and_workspace;
k = pppaw->k;
k2=k*k;
ppr = pppaw->ppr;
pba = pppaw->pba;
pth = pppaw->pth;
ppt = pppaw->ppt;
ppw = pppaw->ppw;
pvecback = ppw->pvecback;
pvecthermo = ppw->pvecthermo;
pvecmetric = ppw->pvecmetric;
pv = ppw->pv;
/** - compute related background quantities */
a = pvecback[pba->index_bg_a];
a_prime_over_a = pvecback[pba->index_bg_H] * a;
a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * a + 2. * a_prime_over_a * a_prime_over_a;
//z = pba->a_today-1.;
R = 4./3. * pvecback[pba->index_bg_rho_g]/pvecback[pba->index_bg_rho_b];
s2_squared = 1.-3.*pba->K/k2;
/** - --> (a) define short-cut notations for the scalar perturbations */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
delta_g = y[pv->index_pt_delta_g];
theta_g = y[pv->index_pt_theta_g];
}
delta_b = y[pv->index_pt_delta_b];
theta_b = y[pv->index_pt_theta_b];
cb2 = pvecthermo[pth->index_th_cb2];
/* perturbed recombination */
if ((ppt->has_perturbed_recombination == _TRUE_) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off) ){
delta_temp = y[pv->index_pt_perturbed_recombination_delta_temp];
}
/** - --> (b) define short-cut notations used only in tight-coupling approximation */
tau_c = 1./pvecthermo[pth->index_th_dkappa]; /* inverse of opacity */
dtau_c = -pvecthermo[pth->index_th_ddkappa]*tau_c*tau_c; /* its first derivative wrt conformal time */
F = tau_c/(1+R); /* F = tau_c/(1+R) */
if (ppr->tight_coupling_approximation >= (int)second_order_CLASS) {
F_prime = dtau_c/(1+R)+tau_c*a_prime_over_a*R/(1+R)/(1+R); /*F' needed by second_order_CLASS and compromise_CLASS */
if (ppr->tight_coupling_approximation == (int)second_order_CLASS) {
F_prime_prime =(- pvecthermo[pth->index_th_dddkappa]*tau_c*tau_c /* F'' needed by second_order_CLASS only */
+ 2.*pvecthermo[pth->index_th_ddkappa]*pvecthermo[pth->index_th_ddkappa]*tau_c*tau_c*tau_c)/(1+R)
+2.*dtau_c*a_prime_over_a*R/(1+R)/(1+R)
+tau_c*((a_primeprime_over_a-2.*a_prime_over_a*a_prime_over_a)+2.*a_prime_over_a*a_prime_over_a*R/(1+R))*R/(1+R)/(1+R);
}
}
/** - --> (c) compute metric-related quantities (depending on gauge; additional gauges can be coded below)
- Each continuity equation contains a term in (theta+metric_continuity) with
metric_continuity = (h_prime/2) in synchronous gauge, (-3 phi_prime) in newtonian gauge
- Each Euler equation contains a source term metric_euler with
metric_euler = 0 in synchronous gauge, (k2 psi) in newtonian gauge
- Each shear derivative equation contains a source term metric_shear equal to
metric_shear = (h_prime+6eta_prime)/2 in synchronous gauge, 0 in newtonian gauge
- metric_shear_prime is the derivative of metric_shear
- In the ufa_class approximation, the leading-order source term is (h_prime/2) in synchronous gauge,
(-3 (phi_prime+psi_prime)) in newtonian gauge: we approximate the later by (-6 phi_prime) */
if (ppt->gauge == synchronous) {
metric_continuity = pvecmetric[ppw->index_mt_h_prime]/2.;
metric_euler = 0.;
metric_shear = k2 * pvecmetric[ppw->index_mt_alpha];
metric_shear_prime = k2 * pvecmetric[ppw->index_mt_alpha_prime];
}
if (ppt->gauge == newtonian) {
metric_continuity = -3.*pvecmetric[ppw->index_mt_phi_prime];
metric_euler = k2*pvecmetric[ppw->index_mt_psi];
metric_shear = 0.;
metric_shear_prime = 0.;
}
/** - --> (d) if some approximation schemes are turned on, enforce a few y[ ] values computed in perturb_einstein */
/* free-streaming photon velocity */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)
theta_g = ppw->rsa_theta_g;
/** - ---> like Ma & Bertschinger */
if (ppr->tight_coupling_approximation == (int)first_order_MB) {
slip=2.*R/(1.+R)*a_prime_over_a*(theta_b-theta_g)
+F*(-a_primeprime_over_a*theta_b
+k2*(-a_prime_over_a*delta_g/2.
+cb2*(-theta_b-metric_continuity)
-4./3.*(-theta_g-metric_continuity)/4.)
-a_prime_over_a*metric_euler);
}
/** - ---> relax assumption dkappa~a\f$^{-2}\f$ (like in CAMB) */
if ((ppr->tight_coupling_approximation == (int)first_order_CAMB) || (ppr->tight_coupling_approximation == (int)compromise_CLASS)) {
slip=(dtau_c/tau_c-2.*a_prime_over_a/(1.+R))*(theta_b-theta_g)
+F*(-a_primeprime_over_a*theta_b
+k2*(-a_prime_over_a*delta_g/2.
+cb2*(-theta_b-metric_continuity)
-4./3.*(-theta_g-metric_continuity)/4.)
-a_prime_over_a*metric_euler);
}
/** - ---> also relax assumption cb2~a\f$^{-1}\f$ */
if ((ppr->tight_coupling_approximation == (int)first_order_CLASS) || (ppr->tight_coupling_approximation == (int)second_order_CLASS)){
slip=(dtau_c/tau_c-2.*a_prime_over_a/(1.+R))*(theta_b-theta_g)
+F*(-a_primeprime_over_a*theta_b
+k2*(-a_prime_over_a*delta_g/2.
+pvecthermo[pth->index_th_dcb2]*delta_b
+cb2*(-theta_b-metric_continuity)
-4./3.*(-theta_g-metric_continuity)/4.)
-a_prime_over_a*metric_euler);
}
/** - ---> intermediate quantities for 2nd order tca: shear_g at first order in tight-coupling */
shear_g=16./45.*tau_c*(theta_g+metric_shear);
/* (Ma & Bertschinger give (1/9)*(4/3) instead of (2/15)*(4/3)
because they didn't include the contribution of G_gamma0
and G_gamma2, which are of the same order as sigma_g. This
was already consistently included in CAMB) */
/** - ---> intermediate quantities for 2nd order tca: zero order for theta_b' = theta_g' */
/** - ----> perturbed recombination has an impact **/
theta_prime = (-a_prime_over_a*theta_b+k2*(cb2*(delta_b+delta_temp)+R/4.*delta_g))/(1.+R) + metric_euler;
/** - ---> intermediate quantities for 2nd order tca: shear_g_prime at first order in tight-coupling */
shear_g_prime=16./45.*(tau_c*(theta_prime+metric_shear_prime)+dtau_c*(theta_g+metric_shear));
/** - ---> 2nd order as in CRS*/
if (ppr->tight_coupling_approximation == (int)second_order_CRS) {
if (ppt->gauge == newtonian) {
class_stop(error_message,
"the second_order_CRS approach to tight-coupling is coded in synchronous gauge, not newtonian: change gauge or try another tight-coupling scheme");
}
if (ppt->gauge == synchronous) {
class_test(pba->sgnK != 0,
ppt->error_message,
"the second_order_CRS approach to tight-coupling is coded in the flat case only: for non-flat try another tight-coupling scheme");
/* infer Delta from h'' using Einstein equation */
Delta = 2*k2*y[pv->index_pt_eta]
-2*a_prime_over_a*pvecmetric[ppw->index_mt_h_prime]
-pvecmetric[ppw->index_mt_h_prime_prime];
/* monster expression for slip at second-order in tight-coupling */
slip=(-2./(1.+R)*a_prime_over_a-pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa])*(theta_b-theta_g)
+(-a_primeprime_over_a*theta_b
-k2*a_prime_over_a*(delta_g/2.-2.*shear_g)
+k2*(cb2*(-theta_b-metric_continuity)
-4./3.*(-theta_g-metric_continuity)/4.
+shear_g_prime)
)/pvecthermo[pth->index_th_dkappa]/(1.+R)
-2.*R*(3.*a_prime_over_a*a_prime_over_a*cb2+(1.+R)*(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)-3.*a_prime_over_a*a_prime_over_a)
/(1.+R)/(1.+R)/(1.+R)*(theta_b-theta_g)/pvecthermo[pth->index_th_dkappa]
+(
a_primeprime_over_a*a_prime_over_a*((2.-3.*cb2)*R-2.)*theta_b/(1.+R)
+a_prime_over_a*k2*(1.-3.*cb2)*theta_b/3./(1.+R)
/* perturbed recombination has an impact (next two lines) */
+a_primeprime_over_a*k2*cb2*(delta_b+delta_temp)/(1.+R)
+k2*k2*(3.*cb2-1.)*cb2*(delta_b+delta_temp)/3./(1.+R)
+k2*k2*R*(3.*cb2-1.)*delta_g/12./(1.+R)
+a_primeprime_over_a*k2*(2.+3.*R)*delta_g/4./(1.+R)
+a_prime_over_a*a_prime_over_a*k2*((2.-3.*cb2)*R-1.)*delta_g/2./(1.+R)
+a_prime_over_a*k2*cb2*(1.+(3.*cb2-2.)*R)*(-theta_b-metric_continuity)/(1.+R)
+a_prime_over_a*k2*(2.+(5.-3.*cb2)*R)*4./3.*(-theta_g-metric_continuity)/4./(1.+R)
+a_prime_over_a*(1.-3.*cb2)*k2*2.*metric_shear/3.
+k2*k2*(3.*cb2-1.)*y[pv->index_pt_eta]/3.
+2.*a_prime_over_a*k2*(3.*cb2-1.)*pvecmetric[ppw->index_mt_eta_prime]
+k2*(1.-3.*cb2)*Delta/6.
)/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]/(1.+R)/(1.+R)
-(4.*a_primeprime_over_a*theta_b-4.*k2*cb2*(-theta_b-metric_continuity)+2.*a_prime_over_a*k2*delta_g+k2*4./3.*(-theta_g-metric_continuity))/2./(1.+R)/(1.+R)*pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]
+4.*a_prime_over_a*R/(1.+R)/(1.+R)*pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]*(theta_b-theta_g);
/* second-order correction to shear */
shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+k2*pvecmetric[ppw->index_mt_alpha_prime]);
}
}
/** - ---> 2nd order like in CLASS paper */
if (ppr->tight_coupling_approximation == (int)second_order_CLASS) {
if (ppt->gauge == newtonian) {
class_stop(error_message,
"the second_order_CLASS approach to tight-coupling is coded in synchronous gauge, not newtonian: change gauge or try another tight-coupling scheme");
}
if (ppt->gauge == synchronous) {
/* zero order for theta_b'' = theta_g'' */
theta_prime_prime = ((R-1.)*a_prime_over_a*theta_prime-(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_b
+k2*(pvecthermo[pth->index_th_dcb2]*delta_b+cb2*(-theta_b-metric_continuity)-a_prime_over_a*R/4.*delta_g+R/4.*4./3.*(-theta_g-metric_continuity)))/(1.+R);
/* zero-order quantities g0, g0', go'' */
g0 = -a_prime_over_a*theta_b + k2*(cb2*delta_b-delta_g/4.);
g0_prime = -a_prime_over_a*theta_prime-(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_b+k2*(pvecthermo[pth->index_th_dcb2]*delta_b+(1./3.-cb2)*(theta_b+0.5*pvecmetric[ppw->index_mt_h_prime]));
g0_prime_prime = -a_prime_over_a*theta_prime_prime-2.*(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_prime
-(2.*a_prime_over_a*a_prime_over_a*a_prime_over_a-3.*a_primeprime_over_a*a_prime_over_a)*theta_b
+k2*(pvecthermo[pth->index_th_ddcb2]*delta_b-2.*pvecthermo[pth->index_th_dcb2]*(theta_b+0.5*pvecmetric[ppw->index_mt_h_prime])+(1./3.-cb2)*(theta_prime+0.5*pvecmetric[ppw->index_mt_h_prime_prime]));
/* slip at second order */
slip = (1.-2*a_prime_over_a*F)*slip + F*k2*s2_squared*(2.*a_prime_over_a*shear_g+shear_g_prime)
-F*(F_prime_prime*g0+2.*F_prime*g0_prime+F*g0_prime_prime);
/* second-order correction to shear */
shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+metric_shear_prime);
}
}
/** - ---> add only the most important 2nd order terms */
if (ppr->tight_coupling_approximation == (int)compromise_CLASS) {
/* slip at second order (only leading second-order terms) */
slip = (1.-2.*a_prime_over_a*F)*slip + F*k2*(2.*a_prime_over_a*s2_squared*shear_g+s2_squared*shear_g_prime-(1./3.-cb2)*(F*theta_prime+2.*F_prime*theta_b));
/* second-order correction to shear */
shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+metric_shear_prime);
}
/** - ---> store tight-coupling values of photon shear and its derivative */
ppw->tca_shear_g = shear_g;
ppw->tca_slip = slip;
return _SUCCESS_;
}
int perturb_rsa_delta_and_theta(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
double k,
double * y,
double a_prime_over_a,
double * pvecthermo,
struct perturb_workspace * ppw
) {
/* - define local variables */
double k2;
k2 = k*k;
// formulas below TBC for curvaturema
/* newtonian gauge */
if (ppt->gauge == newtonian) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
if (ppr->radiation_streaming_approximation == rsa_null) {
ppw->rsa_delta_g = 0.;
ppw->rsa_theta_g = 0.;
}
else {
ppw->rsa_delta_g = -4.*y[ppw->pv->index_pt_phi];
ppw->rsa_theta_g = 6.*ppw->pvecmetric[ppw->index_mt_phi_prime];
}
if (ppr->radiation_streaming_approximation == rsa_MD_with_reio) {
ppw->rsa_delta_g +=
-4./k2*ppw->pvecthermo[pth->index_th_dkappa]*y[ppw->pv->index_pt_theta_b];
ppw->rsa_theta_g +=
3./k2*(ppw->pvecthermo[pth->index_th_ddkappa]*y[ppw->pv->index_pt_theta_b]
+ppw->pvecthermo[pth->index_th_dkappa]*
(-a_prime_over_a*y[ppw->pv->index_pt_theta_b]
+ppw->pvecthermo[pth->index_th_cb2]*k2*y[ppw->pv->index_pt_delta_b]
+k2*y[ppw->pv->index_pt_phi]));
}
if (pba->has_ur == _TRUE_) {
if (ppr->radiation_streaming_approximation == rsa_null) {
ppw->rsa_delta_ur = 0.;
ppw->rsa_theta_ur = 0.;
}
else {
ppw->rsa_delta_ur = -4.*y[ppw->pv->index_pt_phi];
ppw->rsa_theta_ur = 6.*ppw->pvecmetric[ppw->index_mt_phi_prime];
}
}
}
}
/* synchronous gauge */
if (ppt->gauge == synchronous) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
if (ppr->radiation_streaming_approximation == rsa_null) {
ppw->rsa_delta_g = 0.;
ppw->rsa_theta_g = 0.;
}
else {
ppw->rsa_delta_g = 4./k2*(a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime]
-k2*y[ppw->pv->index_pt_eta]);
ppw->rsa_theta_g = -0.5*ppw->pvecmetric[ppw->index_mt_h_prime];
}
if (ppr->radiation_streaming_approximation == rsa_MD_with_reio) {
ppw->rsa_delta_g +=
-4./k2*ppw->pvecthermo[pth->index_th_dkappa]*(y[ppw->pv->index_pt_theta_b]+0.5*ppw->pvecmetric[ppw->index_mt_h_prime]);
ppw->rsa_theta_g +=
3./k2*(ppw->pvecthermo[pth->index_th_ddkappa]*
(y[ppw->pv->index_pt_theta_b]
+0.5*ppw->pvecmetric[ppw->index_mt_h_prime])
+ppw->pvecthermo[pth->index_th_dkappa]*
(-a_prime_over_a*y[ppw->pv->index_pt_theta_b]
+ ppw->pvecthermo[pth->index_th_cb2]*k2*y[ppw->pv->index_pt_delta_b]
-a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime]
+k2*y[ppw->pv->index_pt_eta]));
}
if (pba->has_ur == _TRUE_) {
if (ppr->radiation_streaming_approximation == rsa_null) {
ppw->rsa_delta_ur = 0.;
ppw->rsa_theta_ur = 0.;
}
else {
ppw->rsa_delta_ur = 4./k2*(a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime]
-k2*y[ppw->pv->index_pt_eta]);
ppw->rsa_theta_ur = -0.5*ppw->pvecmetric[ppw->index_mt_h_prime];
}
}
}
}
return _SUCCESS_;
}
|
sillyGPU.c | /*
Tempo sequencial
real 0m9,743s
user 0m9,674s
sys 0m0,030s
real 0m9,695s
user 0m9,673s
sys 0m0,004s
real 0m9,692s
user 0m9,669s
sys 0m0,011s
real 0m9,649s
user 0m9,631s
sys 0m0,004s
real 0m9,694s
user 0m9,682s
sys 0m0,000s
Tempo paralelo - multicore
real 0m2,720s
user 0m15,143s
sys 0m0,056s
real 0m2,822s
user 0m15,280s
sys 0m0,040s
real 0m2,624s
user 0m15,217s
sys 0m0,044s
real 0m2,800s
user 0m15,143s
sys 0m0,056s
real 0m2,680s
user 0m15,143s
sys 0m0,056s
Tempo paralelo - GPU
real 0m2,360s
user 0m14,952s
sys 0m0,033s
real 0m2,465s
user 0m14,981s
sys 0m0,037s
real 0m2,300s
user 0m15,118s
sys 0m0,046s
real 0m2,279s
user 0m15,029s
sys 0m0,030s
real 0m2,381s
user 0m15,603s
sys 0m0,091s
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main()
{
int i, j, n = 100000;
// Allocate input, output and position arrays
int *in = (int*) calloc(n, sizeof(int));
int *pos = (int*) calloc(n, sizeof(int));
int *out = (int*) calloc(n, sizeof(int));
#pragma omp target map(from:in[0:n])
#pragma omp teams distribute parallel for simd
for(i=0; i < n; i++)
in[i] = n-i;
// Print input array
// for(i=0; i < n; i++)
// printf("%d ",in[i]);
#pragma omp parallel for collapse(2) schedule(guided)
for(i=0; i < n; i++)
for(j=0; j < n; j++)
if(in[i] > in[j])
pos[i]++;
#pragma omp target map(from:in[0:n],pos[0:n]) map(to:out[0:n])
#pragma omp teams distribute parallel for simd
for(i=0; i < n; i++)
out[pos[i]] = in[i];
// print output array
// for(i=0; i < n; i++)
// printf("%d ",out[i]);
#pragma omp parallel for schedule(guided)
for(i=0; i < n; i++)
if(i+1 != out[i])
{
printf("test failed\n");
exit(0);
}
printf("test passed\n");
} |
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[256],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if (image->alpha_trait != BlendPixelTrait || image->colorspace != sRGBColorspace)
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
register ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))*
opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/
(MagickRealType) opacity)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=background;
SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
image->alpha_trait=BlendPixelTrait;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static StringInfo *ParseImageResourceBlocks(Image *image,
const unsigned char *blocks,size_t length,
MagickBooleanType *has_merged_image,ExceptionInfo *exception)
{
const unsigned char
*p;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
if ((p+count) > (blocks+length))
break;
switch (id)
{
case 0x03ed:
{
char
value[MagickPathExtent];
unsigned short
resolution;
/*
Resolution info.
*/
if (count < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",
image->resolution.x);
(void) SetImageProperty(image,"tiff:XResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",
image->resolution.y);
(void) SetImageProperty(image,"tiff:YResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((count > 3) && (*(p+4) == 0))
*has_merged_image=MagickFalse;
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline void ReversePSDString(Image *image,char *p,size_t length)
{
char
*q;
if (image->endian == MSBEndian)
return;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
if (type == 0)
{
if (packet_size == 1)
SetPixelIndex(image,ScaleQuantumToChar(pixel),q);
else
SetPixelIndex(image,ScaleQuantumToShort(pixel),q);
}
color=image->colormap+(ssize_t) ConstrainColormapIndex(image,
GetPixelIndex(image,q),exception);
if ((type == 0) && (channels > 1))
return;
else
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(image,pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
break;
}
case -3:
case 1:
{
SetPixelGreen(image,pixel,q);
break;
}
case -4:
case 2:
{
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const size_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register Quantum
*q;
register ssize_t
x;
size_t
packet_size;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else if (packet_size == 2)
{
unsigned short
nibble;
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
count,
row_size;
ssize_t
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != row_size)
{
status=MagickFalse;
break;
}
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+512)) // arbitrary number
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
length,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
p=pixels;
while (count > 0)
{
length=image->columns;
while (--length)
{
if (packet_size == 2)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
}
// else if (packet_size == 4)
// {
// TODO: Figure out what to do there.
// }
else
*(p+1)+=*p;
p+=packet_size;
}
p+=packet_size;
count-=row_size;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
SeekBlob(image,layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
SeekBlob(image,offset+layer_info->channel_info[channel].size-2,SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
/* TODO: Remove this when we figure out how to support this */
if ((compression == ZipWithPrediction) && (image->depth == 32))
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)");
return(MagickFalse);
}
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,j,
compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
register ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
continue;
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
j,
number_layers;
size=GetPSDSize(psd_info,image);
if (size == 0)
{
/*
Skip layers & masks.
*/
(void) ReadBlobLong(image);
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
status=MagickFalse;
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
return(MagickTrue);
else
{
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count != 0) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
else
return(MagickTrue);
}
}
status=MagickTrue;
if (size != 0)
{
layer_info=(LayerInfo *) NULL;
number_layers=(short) ReadBlobShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*
sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
x,
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
layer_info[i].page.y=ReadBlobSignedLong(image);
layer_info[i].page.x=ReadBlobSignedLong(image);
y=ReadBlobSignedLong(image);
x=ReadBlobSignedLong(image);
layer_info[i].page.width=(size_t) (x-layer_info[i].page.x);
layer_info[i].page.height=(size_t) (y-layer_info[i].page.y);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey);
ReversePSDString(image,layer_info[i].blendkey,4);
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=ReadBlobSignedLong(image);
layer_info[i].mask.page.x=ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)-
layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (ReadBlobSignedLong(image)-
layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping == MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=0; j < layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,i,(MagickSizeType)
number_layers);
if (status == MagickFalse)
break;
}
}
if (status != MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers > 0)
{
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
}
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
}
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
ssize_t
type;
type=i;
if ((type == 1) && (psd_info->channels == 2))
type=-1;
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,type,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,i,psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
has_merged_image,
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count == 0) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
psd_info.min_channels=3;
if (psd_info.mode == LabMode)
SetImageColorspace(image,LabColorspace,exception);
if (psd_info.mode == CMYKMode)
{
psd_info.min_channels=4;
SetImageColorspace(image,CMYKColorspace,exception);
if (psd_info.channels > 4)
SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
}
else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,psd_info.depth < 16 ? 256 : 65536,
exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
SetImageColorspace(image,GRAYColorspace,exception);
if (psd_info.channels > 1)
SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
}
else
if (psd_info.channels > 3)
SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
32 bits per pixel; the colormap is ignored.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(image,blocks,(size_t) length,
&has_merged_image,exception);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
if ((has_merged_image != MagickFalse) || (GetImageListLength(image) == 1))
has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image,
&psd_info,exception);
if ((has_merged_image == MagickFalse) && (GetImageListLength(image) == 1) &&
(length != 0))
{
SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (has_merged_image == MagickFalse)
{
Image
*merged;
if (GetImageListLength(image) == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
SetImageAlphaChannel(image,TransparentAlphaChannel,exception);
image->background_color.alpha=TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
merged=MergeImageLayers(image,FlattenLayer,exception);
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
(void) SetImageProfile(image,GetStringInfoName(profile),profile,
exception);
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickSizeType offset)
{
MagickSizeType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickSizeType offset)
{
MagickSizeType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info, image, size);
SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const CompressionType compression,
const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (compression == RLECompression)
{
length=WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
length=WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
const CompressionType compression,ExceptionInfo *exception)
{
int
y;
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const Quantum
*p;
register ssize_t
i;
size_t
count,
length;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
#define CHUNK 16384
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK,
sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) CHUNK;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) CHUNK-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
CompressionType
compression;
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
compression=next_image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if (next_image->storage_class != PseudoClass)
{
if (IsImageGray(next_image) == MagickFalse)
channels=next_image->colorspace == CMYKColorspace ? 4 : 3;
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,
channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if (next_image->storage_class == PseudoClass)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,compression,
exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(*p++);
key[1]=(*p++);
key[2]=(*p++);
key[3]=(*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=strlen(property) == 9 ? 255 : 0;
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1U;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=next_image->colorspace == CMYKColorspace ? 4U : 3U;
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,next_image->compose==NoCompositeOp ?
1 << 0x02 : 1); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,mask->page.y);
size+=WriteBlobSignedLong(image,mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) mask->rows+
mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->columns+
mask->page.x);
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,mask->compose == NoCompositeOp ? 2 : 0);
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=WritePolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
length,
num_channels,
packet_size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->alpha_trait != UndefinedPixelTrait)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorAlphaType) && (image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(
image->colormap[i].green));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((MagickOffsetType) GetStringInfoLength(icc_profile) !=
PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
SetPSDSize(&psd_info,image,0);
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
streaming_rrr_generator.h | //===------------------------------------------------------------*- C++ -*-===//
//
// Ripples: A C++ Library for Influence Maximization
// Marco Minutoli <marco.minutoli@pnnl.gov>
// Pacific Northwest National Laboratory
//
//===----------------------------------------------------------------------===//
//
// Copyright 2018 Battelle Memorial Institute
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
//===----------------------------------------------------------------------===//
#ifndef RIPPLES_STREAMING_RRR_GENERATOR_H
#define RIPPLES_STREAMING_RRR_GENERATOR_H
#include <algorithm>
#include <atomic>
#include <cassert>
#include <cstdlib>
#include <memory>
#include <sstream>
#include <unordered_map>
#include <vector>
#include "omp.h"
#include "spdlog/sinks/stdout_color_sinks.h"
#include "spdlog/spdlog.h"
#include "trng/uniform_int_dist.hpp"
#include "ripples/imm_execution_record.h"
#ifdef RIPPLES_ENABLE_CUDA
#include "ripples/cuda/cuda_generate_rrr_sets.h"
#include "ripples/cuda/from_nvgraph/imm/bfs.hxx"
#endif
#if CUDA_PROFILE
#include <chrono>
#endif
namespace ripples {
int streaming_command_line(std::unordered_map<size_t, size_t> &worker_to_gpu,
size_t streaming_workers,
size_t streaming_gpu_workers,
std::string gpu_mapping_string) {
auto console = spdlog::get("console");
if (!(streaming_workers > 0 && streaming_gpu_workers <= streaming_workers)) {
console->error("invalid number of streaming workers");
return -1;
}
#ifdef RIPPLES_ENABLE_CUDA
auto num_gpus = cuda_num_devices();
if (!gpu_mapping_string.empty()) {
size_t gpu_id = 0;
std::istringstream iss(gpu_mapping_string);
std::string token;
while (worker_to_gpu.size() < streaming_gpu_workers &&
std::getline(iss, token, ',')) {
std::stringstream omp_num_ss(token);
size_t omp_num;
omp_num_ss >> omp_num;
if (!(omp_num < streaming_workers)) {
console->error("invalid worker in worker-to-GPU mapping: {}", omp_num);
return -1;
}
if (worker_to_gpu.find(omp_num) != worker_to_gpu.end()) {
console->error("duplicated worker-to-GPU mapping: {}", omp_num);
return -1;
}
worker_to_gpu[omp_num] = gpu_id++;
if (gpu_id == num_gpus) gpu_id = 0;
}
if (worker_to_gpu.size() < streaming_gpu_workers) {
console->error("GPU mapping string is too short");
return -1;
}
} else {
// by default, map GPU workers after CPU workers
size_t gpu_id = 0;
size_t omp_num = streaming_workers - streaming_gpu_workers;
for (; omp_num < streaming_workers; ++omp_num) {
worker_to_gpu[omp_num] = gpu_id++;
if (gpu_id == num_gpus) gpu_id = 0;
}
}
#else // RIPPLES_ENABLE_CUDA
assert(streaming_gpu_workers == 0);
#endif // RIPPLES_ENABLE_CUDA
return 0;
}
template <typename GraphTy, typename ItrTy>
class WalkWorker {
using vertex_t = typename GraphTy::vertex_type;
public:
WalkWorker(const GraphTy &G) : G_(G) {}
virtual ~WalkWorker() {}
virtual void svc_loop(std::atomic<size_t> &mpmc_head, ItrTy begin,
ItrTy end) = 0;
protected:
const GraphTy &G_;
#if CUDA_PROFILE
public:
virtual void begin_prof_iter() = 0;
virtual void prof_record(typename IMMExecutionRecord::walk_iteration_prof &,
size_t) = 0;
#endif
};
template <typename GraphTy, typename PRNGeneratorTy, typename ItrTy,
typename diff_model_tag>
class CPUWalkWorker : public WalkWorker<GraphTy, ItrTy> {
using vertex_t = typename GraphTy::vertex_type;
public:
CPUWalkWorker(const GraphTy &G, const PRNGeneratorTy &rng)
: WalkWorker<GraphTy, ItrTy>(G), rng_(rng), u_(0, G.num_nodes()) {}
void svc_loop(std::atomic<size_t> &mpmc_head, ItrTy begin, ItrTy end) {
size_t offset = 0;
while ((offset = mpmc_head.fetch_add(batch_size_)) <
std::distance(begin, end)) {
auto first = begin;
std::advance(first, offset);
auto last = first;
std::advance(last, batch_size_);
if (last > end) last = end;
batch(first, last);
}
}
private:
static constexpr size_t batch_size_ = 32;
PRNGeneratorTy rng_;
trng::uniform_int_dist u_;
void batch(ItrTy first, ItrTy last) {
#if CUDA_PROFILE
auto start = std::chrono::high_resolution_clock::now();
#endif
auto size = std::distance(first, last);
auto local_rng = rng_;
auto local_u = u_;
for (;first != last; ++first) {
vertex_t root = local_u(local_rng);
AddRRRSet(this->G_, root, local_rng, *first, diff_model_tag{});
}
rng_ = local_rng;
u_ = local_u;
#if CUDA_PROFILE
auto &p(prof_bd.back());
p.d_ += std::chrono::duration_cast<std::chrono::nanoseconds>(
std::chrono::high_resolution_clock::now() - start);
p.n_ += size;
#endif
}
#if CUDA_PROFILE
public:
struct iter_profile_t {
size_t n_{0};
std::chrono::nanoseconds d_{0};
};
using profile_t = std::vector<iter_profile_t>;
profile_t prof_bd;
void begin_prof_iter() { prof_bd.emplace_back(); }
void print_prof_iter(size_t i) {
auto console = spdlog::get("console");
assert(i < prof_bd.size());
auto &p(prof_bd[i]);
if (p.n_)
console->info(
"n-sets={}\tns={}\tb={}", p.n_, p.d_.count(),
(float)p.n_ * 1e03 /
std::chrono::duration_cast<std::chrono::milliseconds>(p.d_)
.count());
else
console->info("> idle worker");
}
void prof_record(typename IMMExecutionRecord::walk_iteration_prof &r,
size_t i) {
assert(i < prof_bd.size());
typename IMMExecutionRecord::cpu_walk_prof res;
auto &p(prof_bd[i]);
res.NumSets = p.n_;
res.Total = std::chrono::duration_cast<decltype(res.Total)>(p.d_);
r.CPUWalks.push_back(res);
}
#endif
};
template <typename GraphTy, typename PRNGeneratorTy, typename ItrTy,
typename diff_model_tag>
class GPUWalkWorker;
#ifdef RIPPLES_ENABLE_CUDA
template <typename GraphTy, typename PRNGeneratorTy, typename ItrTy>
class GPUWalkWorker<GraphTy, PRNGeneratorTy, ItrTy, linear_threshold_tag>
: public WalkWorker<GraphTy, ItrTy> {
using vertex_t = typename GraphTy::vertex_type;
public:
struct config_t {
config_t(size_t) {
auto console = spdlog::get("console");
assert(num_threads_ % block_size_ == 0);
max_blocks_ = num_threads_ / block_size_;
#if CUDA_PROFILE
console->info(
"> [GPUWalkWorkerLT::config_t] "
"block_size_={}\tnum_threads_={}\tmax_blocks_={}",
block_size_, num_threads_, max_blocks_);
#endif
}
size_t num_gpu_threads() const { return num_threads_; }
// configuration parameters
static constexpr size_t block_size_ = 256;
static constexpr size_t num_threads_ = 1 << 15;
const size_t mask_words_ = 8; // maximum walk size
// inferred configuration
size_t max_blocks_{0};
};
GPUWalkWorker(const config_t &conf, const GraphTy &G,
const PRNGeneratorTy &rng,
std::shared_ptr<cuda_ctx<GraphTy>> ctx)
: WalkWorker<GraphTy, ItrTy>(G),
conf_(conf),
rng_(rng),
u_(0, G.num_nodes()),
cuda_ctx_(ctx) {
cuda_set_device(ctx->gpu_id);
cuda_stream_create(&cuda_stream_);
// allocate host/device memory
auto mask_size = conf.mask_words_ * sizeof(mask_word_t);
lt_res_mask_ = (mask_word_t *)malloc(conf_.num_gpu_threads() * mask_size);
cuda_malloc((void **)&d_lt_res_mask_, conf_.num_gpu_threads() * mask_size);
// allocate device-size RNGs
cuda_malloc((void **)&d_trng_state_,
conf_.num_gpu_threads() * sizeof(PRNGeneratorTy));
}
~GPUWalkWorker() {
cuda_set_device(cuda_ctx_->gpu_id);
cuda_stream_destroy(cuda_stream_);
// free host/device memory
free(lt_res_mask_);
cuda_free(d_lt_res_mask_);
cuda_free(d_trng_state_);
}
void rng_setup(const PRNGeneratorTy &master_rng, size_t num_seqs,
size_t first_seq) {
cuda_set_device(cuda_ctx_->gpu_id);
cuda_lt_rng_setup(d_trng_state_, master_rng, num_seqs, first_seq,
conf_.max_blocks_, conf_.block_size_);
}
void svc_loop(std::atomic<size_t> &mpmc_head, ItrTy begin, ItrTy end) {
cuda_set_device(cuda_ctx_->gpu_id);
size_t offset = 0;
auto batch_size = conf_.num_gpu_threads();
while ((offset = mpmc_head.fetch_add(batch_size)) <
std::distance(begin, end)) {
auto first = begin;
std::advance(first, offset);
auto last = first;
std::advance(last, batch_size);
if (last > end) last = end;
batch(first, last);
}
}
private:
config_t conf_;
PRNGeneratorTy rng_;
trng::uniform_int_dist u_;
cudaStream_t cuda_stream_;
std::shared_ptr<cuda_ctx<GraphTy>> cuda_ctx_;
// memory buffers
mask_word_t *lt_res_mask_, *d_lt_res_mask_;
PRNGeneratorTy *d_trng_state_;
void batch(ItrTy first, ItrTy last) {
#if CUDA_PROFILE
auto &p(prof_bd.back());
auto start = std::chrono::high_resolution_clock::now();
#endif
auto size = std::distance(first, last);
cuda_lt_kernel(conf_.max_blocks_, conf_.block_size_, size,
this->G_.num_nodes(), d_trng_state_, d_lt_res_mask_,
conf_.mask_words_, cuda_ctx_.get(), cuda_stream_);
#if CUDA_PROFILE
cuda_sync(cuda_stream_);
auto t1 = std::chrono::high_resolution_clock::now();
p.dwalk_ +=
std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - start);
auto t0 = t1;
#endif
cuda_d2h(lt_res_mask_, d_lt_res_mask_,
size * conf_.mask_words_ * sizeof(mask_word_t), cuda_stream_);
cuda_sync(cuda_stream_);
#if CUDA_PROFILE
t1 = std::chrono::high_resolution_clock::now();
p.dd2h_ += std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0);
t0 = t1;
#endif
batch_lt_build(first, size);
#if CUDA_PROFILE
t1 = std::chrono::high_resolution_clock::now();
p.dbuild_ += std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0);
#endif
#if CUDA_PROFILE
p.d_ += std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - start);
p.n_ += size;
#endif
}
void batch_lt_build(ItrTy first, size_t batch_size) {
#if CUDA_PROFILE
auto &p(prof_bd.back());
#endif
for (size_t i = 0; i < batch_size; ++i, ++first) {
auto &rrr_set(*first);
rrr_set.reserve(conf_.mask_words_);
auto res_mask = lt_res_mask_ + (i * conf_.mask_words_);
if (res_mask[0] != this->G_.num_nodes()) {
// valid walk
for (size_t j = 0;
j < conf_.mask_words_ && res_mask[j] != this->G_.num_nodes();
++j) {
rrr_set.push_back(res_mask[j]);
}
} else {
// invalid walk
#if CUDA_PROFILE
p.num_exceedings_++;
#endif
auto root = res_mask[1];
AddRRRSet(this->G_, root, rng_, rrr_set,
ripples::linear_threshold_tag{});
}
std::stable_sort(rrr_set.begin(), rrr_set.end());
}
}
#if CUDA_PROFILE
struct iter_profile_t {
size_t n_{0}, num_exceedings_{0};
std::chrono::nanoseconds d_{0}, dwalk_{0}, dd2h_{0}, dbuild_{0};
};
using profile_t = std::vector<iter_profile_t>;
profile_t prof_bd;
public:
void begin_prof_iter() { prof_bd.emplace_back(); }
void print_prof_iter(size_t i) {
auto console = spdlog::get("console");
assert(i < prof_bd.size());
auto &p(prof_bd[i]);
if (p.n_) {
console->info(
"n-sets={}\tn-exc={}\tns={}\tb={}", p.n_, p.num_exceedings_,
p.d_.count(),
(float)p.n_ * 1e03 /
std::chrono::duration_cast<std::chrono::milliseconds>(p.d_)
.count());
console->info("walk={}\td2h={}\tbuild={}", p.dwalk_.count(),
p.dd2h_.count(), p.dbuild_.count());
console->info("n. exceedings={} (/{}={})", p.num_exceedings_, p.n_,
(float)p.num_exceedings_ / p.n_);
} else
console->info("> idle worker");
}
void prof_record(typename IMMExecutionRecord::walk_iteration_prof &r,
size_t i) {
assert(i < prof_bd.size());
typename IMMExecutionRecord::gpu_walk_prof res;
auto &p(prof_bd[i]);
res.NumSets = p.n_;
res.Total = std::chrono::duration_cast<decltype(res.Total)>(p.d_);
res.Kernel = std::chrono::duration_cast<decltype(res.Kernel)>(p.dwalk_);
res.D2H = std::chrono::duration_cast<decltype(res.D2H)>(p.dd2h_);
res.Post = std::chrono::duration_cast<decltype(res.Post)>(p.dbuild_);
r.GPUWalks.push_back(res);
}
#endif
};
template <typename GraphTy, typename PRNGeneratorTy, typename ItrTy>
class GPUWalkWorker<GraphTy, PRNGeneratorTy, ItrTy, independent_cascade_tag>
: public WalkWorker<GraphTy, ItrTy> {
using vertex_t = typename GraphTy::vertex_type;
using bfs_solver_t = nvgraph::Bfs<int, PRNGeneratorTy>;
public:
struct config_t {
config_t(size_t num_workers)
: block_size_(bfs_solver_t::traverse_block_size()),
max_blocks_(num_workers ? cuda_max_blocks() / num_workers : 0) {
auto console = spdlog::get("console");
console->info(
"> [GPUWalkWorkerIC::config_t] "
"max_blocks_={}\tblock_size_={}",
max_blocks_, block_size_);
}
size_t num_gpu_threads() const { return max_blocks_ * block_size_; }
const size_t max_blocks_;
const size_t block_size_;
};
GPUWalkWorker(const config_t &conf, const GraphTy &G,
const PRNGeneratorTy &rng,
std::shared_ptr<cuda_ctx<GraphTy>> ctx)
: WalkWorker<GraphTy, ItrTy>(G),
conf_(conf),
rng_(rng),
u_(0, G.num_nodes()),
cuda_ctx_(ctx) {
cuda_set_device(ctx->gpu_id);
cuda_stream_create(&cuda_stream_);
// allocate host/device memory
ic_predecessors_ = (int *)malloc(
G.num_nodes() * sizeof(typename cuda_device_graph<GraphTy>::vertex_t));
cuda_malloc(
(void **)&d_ic_predecessors_,
G.num_nodes() * sizeof(typename cuda_device_graph<GraphTy>::vertex_t));
// allocate device-size RNGs
cuda_malloc((void **)&d_trng_state_,
conf_.num_gpu_threads() * sizeof(PRNGeneratorTy));
// create the solver
solver_ = new bfs_solver_t(
this->G_.num_nodes(), this->G_.num_edges(),
cuda_graph_index(cuda_ctx_.get()), cuda_graph_edges(cuda_ctx_.get()),
cuda_graph_weights(cuda_ctx_.get()), true, TRAVERSAL_DEFAULT_ALPHA,
TRAVERSAL_DEFAULT_BETA, conf_.max_blocks_, cuda_stream_);
solver_->configure(nullptr, d_ic_predecessors_, nullptr);
}
~GPUWalkWorker() {
cuda_set_device(cuda_ctx_->gpu_id);
delete solver_;
cuda_stream_destroy(cuda_stream_);
// free host/device memory
free(ic_predecessors_);
cuda_free(d_ic_predecessors_);
cuda_free(d_trng_state_);
}
void rng_setup(const PRNGeneratorTy &master_rng, size_t num_seqs,
size_t first_seq) {
cuda_set_device(cuda_ctx_->gpu_id);
cuda_ic_rng_setup(d_trng_state_, master_rng, num_seqs, first_seq,
conf_.max_blocks_, conf_.block_size_);
solver_->rng(d_trng_state_);
}
void svc_loop(std::atomic<size_t> &mpmc_head, ItrTy begin, ItrTy end) {
// set device and stream
cuda_set_device(cuda_ctx_->gpu_id);
size_t offset = 0;
while ((offset = mpmc_head.fetch_add(batch_size_)) <
std::distance(begin, end)) {
auto first = begin;
std::advance(first, offset);
auto last = first;
std::advance(last, batch_size_);
if (last > end) last = end;
batch(first, last);
}
}
private:
static constexpr size_t batch_size_ = 32;
config_t conf_;
PRNGeneratorTy rng_;
trng::uniform_int_dist u_;
// CUDA context
cudaStream_t cuda_stream_;
std::shared_ptr<cuda_ctx<GraphTy>> cuda_ctx_;
// nvgraph machinery
bfs_solver_t *solver_;
// memory buffers
typename cuda_device_graph<GraphTy>::vertex_t *ic_predecessors_,
*d_ic_predecessors_;
PRNGeneratorTy *d_trng_state_;
void batch(ItrTy first, ItrTy last) {
#if CUDA_PROFILE
auto &p(prof_bd.back());
auto start = std::chrono::high_resolution_clock::now();
#endif
auto size = std::distance(first, last);
for (size_t wi = 0; wi < size; ++wi) {
#if CUDA_PROFILE
auto t0 = std::chrono::high_resolution_clock::now();
#endif
auto root = u_(rng_);
solver_->traverse(reinterpret_cast<int>(root));
#if CUDA_PROFILE
cuda_sync(cuda_stream_);
auto t1 = std::chrono::high_resolution_clock::now();
p.dwalk_ += std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0);
t0 = t1;
#endif
cuda_d2h(ic_predecessors_, d_ic_predecessors_,
this->G_.num_nodes() *
sizeof(typename cuda_device_graph<GraphTy>::vertex_t),
cuda_stream_);
cuda_sync(cuda_stream_);
#if CUDA_PROFILE
t1 = std::chrono::high_resolution_clock::now();
p.dd2h_ += std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0);
t0 = t1;
#endif
ic_predecessors_[root] = root;
ic_build(first++);
#if CUDA_PROFILE
t1 = std::chrono::high_resolution_clock::now();
p.dbuild_ +=
std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0);
#endif
}
#if CUDA_PROFILE
p.d_ += std::chrono::duration_cast<std::chrono::nanoseconds>(
std::chrono::high_resolution_clock::now() - start);
p.n_ += size;
#endif
}
void ic_build(ItrTy dst) {
auto &rrr_set(*dst);
for (vertex_t i = 0; i < this->G_.num_nodes(); ++i)
if (ic_predecessors_[i] != -1) rrr_set.push_back(i);
}
#if CUDA_PROFILE
struct iter_profile_t {
size_t n_{0};
std::chrono::nanoseconds d_{0}, dwalk_{0}, dd2h_{0}, dbuild_{0};
};
using profile_t = std::vector<iter_profile_t>;
profile_t prof_bd;
public:
void begin_prof_iter() { prof_bd.emplace_back(); }
void print_prof_iter(size_t i) {
auto console = spdlog::get("console");
assert(i < prof_bd.size());
auto &p(prof_bd[i]);
if (p.n_) {
console->info(
"n-sets={}\tns={}\tb={}", p.n_, p.d_.count(),
(float)p.n_ * 1e03 /
std::chrono::duration_cast<std::chrono::milliseconds>(p.d_)
.count());
console->info("walk={}\td2h={}\tbuild={}", p.dwalk_.count(),
p.dd2h_.count(), p.dbuild_.count());
} else
console->info("> idle worker");
}
void prof_record(typename IMMExecutionRecord::walk_iteration_prof &r,
size_t i) {
assert(i < prof_bd.size());
typename IMMExecutionRecord::gpu_walk_prof res;
auto &p(prof_bd[i]);
res.NumSets = p.n_;
res.Total = std::chrono::duration_cast<decltype(res.Total)>(p.d_);
res.Kernel = std::chrono::duration_cast<decltype(res.Kernel)>(p.dwalk_);
res.D2H = std::chrono::duration_cast<decltype(res.D2H)>(p.dd2h_);
res.Post = std::chrono::duration_cast<decltype(res.Post)>(p.dbuild_);
r.GPUWalks.push_back(res);
}
#endif
};
#endif // RIPPLES_ENABLE_CUDA
template <typename GraphTy, typename PRNGeneratorTy, typename ItrTy,
typename diff_model_tag>
class StreamingRRRGenerator {
using vertex_t = typename GraphTy::vertex_type;
using worker_t = WalkWorker<GraphTy, ItrTy>;
using gpu_worker_t =
GPUWalkWorker<GraphTy, PRNGeneratorTy, ItrTy, diff_model_tag>;
using cpu_worker_t =
CPUWalkWorker<GraphTy, PRNGeneratorTy, ItrTy, diff_model_tag>;
public:
StreamingRRRGenerator(const GraphTy &G, const PRNGeneratorTy &master_rng,
IMMExecutionRecord &record, size_t num_cpu_workers,
size_t num_gpu_workers,
const std::unordered_map<size_t, size_t> &worker_to_gpu)
: num_cpu_workers_(num_cpu_workers),
num_gpu_workers_(num_gpu_workers),
record_(record),
console(spdlog::get("Streaming Generator")) {
if (!console) {
console = spdlog::stdout_color_st("Streaming Generator");
}
#ifdef RIPPLES_ENABLE_CUDA
// init GPU contexts
for (auto &m : worker_to_gpu) {
auto gpu_id = m.second;
if (cuda_contexts_.find(gpu_id) == cuda_contexts_.end()) {
cuda_contexts_[gpu_id] =
std::shared_ptr<cuda_ctx<GraphTy>>(cuda_make_ctx(G, gpu_id));
}
}
typename gpu_worker_t::config_t gpu_conf(num_gpu_workers_);
assert(gpu_conf.max_blocks_ * num_gpu_workers_ <= cuda_max_blocks());
auto num_gpu_threads_per_worker = gpu_conf.num_gpu_threads();
auto num_rng_sequences =
num_cpu_workers_ + num_gpu_workers_ * (num_gpu_threads_per_worker + 1);
auto gpu_seq_offset = num_cpu_workers_ + num_gpu_workers_;
#else
assert(num_gpu_workers_ == 0);
size_t num_rng_sequences = num_cpu_workers_;
#endif
// console->info("CPU Workers {}", num_cpu_workers);
// console->info("GPU Workers {}", num_gpu_workers);
// translate user-mapping string into vector
size_t gpu_worker_id = 0;
size_t cpu_worker_id = 0;
for (size_t omp_num = 0; omp_num < num_cpu_workers + num_gpu_workers;
++omp_num) {
#ifdef RIPPLES_ENABLE_CUDA
if (worker_to_gpu.find(omp_num) != worker_to_gpu.end()) {
// create and add a GPU worker
auto gpu_id = worker_to_gpu.at(omp_num);
assert(cuda_contexts_.find(gpu_id) != cuda_contexts_.end());
console->info("> mapping: omp={}\t->\tGPU-device={}", omp_num, gpu_id);
auto rng = master_rng;
rng.split(num_rng_sequences, num_cpu_workers_ + gpu_worker_id);
auto w = new gpu_worker_t(gpu_conf, G, rng, cuda_contexts_.at(gpu_id));
w->rng_setup(
master_rng, num_rng_sequences,
gpu_seq_offset + gpu_worker_id * num_gpu_threads_per_worker);
workers.push_back(w);
++gpu_worker_id;
} else
#endif
{
// create and add a CPU worker
// console->info("> mapping: omp={}\t->\tCPU", omp_num);
// console->info("cpu_worker_id = {}", cpu_worker_id);
auto rng = master_rng;
rng.split(num_rng_sequences, cpu_worker_id);
workers.push_back(new cpu_worker_t(G, rng));
++cpu_worker_id;
}
}
// console->info("Configured");
}
StreamingRRRGenerator(StreamingRRRGenerator &&O)
: num_cpu_workers_(O.num_cpu_workers_),
num_gpu_workers_(O.num_gpu_workers_),
max_batch_size_(O.max_batch_size_),
console(std::move(O.console)),
#if RIPPLES_ENABLE_CUDA
cuda_contexts_(std::move(O.cuda_contexts_)),
#endif
workers(std::move(O.workers)),
mpmc_head(O.mpmc_head.load()),
#if CUDA_PROFILE
prof_bd(std::move(O.prof_bd)),
#endif
record_(O.record_) {
}
~StreamingRRRGenerator() {
#if CUDA_PROFILE
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(prof_bd.d);
console->info("*** BEGIN Streaming Engine profiling");
for (size_t i = 0; i < prof_bd.prof_bd.size(); ++i) {
console->info("+++ BEGIN iter {}", i);
console->info("--- CPU workers");
for (auto &wp : cpu_workers) wp->print_prof_iter(i);
#ifdef RIPPLES_ENABLE_CUDA
console->info("--- GPU workers");
for (auto &wp : gpu_workers) wp->print_prof_iter(i);
#endif
console->info("--- overall");
auto &p(prof_bd.prof_bd[i]);
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(p.d_);
console->info("n. sets = {}", p.n_);
console->info("elapsed (ns) = {}", p.d_.count());
console->info("throughput (sets/sec) = {}",
(float)p.n_ * 1e03 / ms.count());
console->info("+++ END iter {}", i);
// execution record
for (auto &wp : workers) {
wp->prof_record(record_.WalkIterations[i], i);
}
}
console->info("--- overall");
console->info("n. sets = {}", prof_bd.n);
console->info("n. iters = {}", prof_bd.prof_bd.size());
console->info("elapsed (ms) = {}", ms.count());
console->info("throughput (sets/sec) = {}",
(float)prof_bd.n * 1e03 / ms.count());
console->info("*** END Streaming Engine profiling");
#endif
for (auto &w : workers) delete w;
#ifdef RIPPLES_ENABLE_CUDA
// for (auto &m : cuda_contexts_) cuda_destroy_ctx(m.second);
#endif
}
IMMExecutionRecord &execution_record() { return record_; }
void generate(ItrTy begin, ItrTy end) {
#if CUDA_PROFILE
auto start = std::chrono::high_resolution_clock::now();
for (auto &w : workers) w->begin_prof_iter();
record_.WalkIterations.emplace_back();
#endif
mpmc_head.store(0);
#pragma omp parallel num_threads(num_cpu_workers_ + num_gpu_workers_)
{
size_t rank = omp_get_thread_num();
workers[rank]->svc_loop(mpmc_head, begin, end);
}
#if CUDA_PROFILE
auto d = std::chrono::duration_cast<std::chrono::nanoseconds>(
std::chrono::high_resolution_clock::now() - start);
prof_bd.prof_bd.emplace_back(std::distance(begin, end), d);
prof_bd.n += std::distance(begin, end);
prof_bd.d += std::chrono::duration_cast<std::chrono::microseconds>(d);
auto &ri(record_.WalkIterations.back());
ri.NumSets = std::distance(begin, end);
ri.Total = std::chrono::duration_cast<decltype(ri.Total)>(d);
#endif
}
bool isGpuEnabled() const { return num_gpu_workers_ != 0; }
private:
size_t num_cpu_workers_, num_gpu_workers_;
size_t max_batch_size_;
std::shared_ptr<spdlog::logger> console;
#ifdef RIPPLES_ENABLE_CUDA
std::unordered_map<size_t, std::shared_ptr<cuda_ctx<GraphTy>>> cuda_contexts_;
#endif
std::vector<worker_t *> workers;
std::atomic<size_t> mpmc_head{0};
#if CUDA_PROFILE
struct iter_profile_t {
iter_profile_t(size_t n, std::chrono::nanoseconds d) : n_(n), d_(d) {}
size_t n_{0};
std::chrono::nanoseconds d_{0};
};
struct profile_t {
size_t n{0};
std::chrono::microseconds d{0};
std::vector<iter_profile_t> prof_bd;
};
profile_t prof_bd;
#endif
IMMExecutionRecord &record_;
};
} // namespace ripples
#endif // RIPPLES_STREAMING_RRR_GENERATOR_H
|
oskar_imager_rotate_coords.c | /*
* Copyright (c) 2016-2021, The OSKAR Developers.
* See the LICENSE file at the top-level directory of this distribution.
*/
#include "imager/private_imager.h"
#include "imager/oskar_imager.h"
#ifdef __cplusplus
extern "C" {
#endif
void oskar_imager_rotate_coords(oskar_Imager* h, size_t num_coords,
const oskar_Mem* uu_in, const oskar_Mem* vv_in, const oskar_Mem* ww_in,
oskar_Mem* uu_out, oskar_Mem* vv_out, oskar_Mem* ww_out)
{
#ifdef OSKAR_OS_WIN
int i;
const int num = (const int) num_coords;
#else
size_t i = 0;
const size_t num = num_coords;
#endif
const double *M = h->M;
oskar_timer_resume(h->tmr_rotate);
if (oskar_mem_precision(uu_in) == OSKAR_SINGLE)
{
float *uu_o = 0, *vv_o = 0, *ww_o = 0;
const float *uu_i = 0, *vv_i = 0, *ww_i = 0;
uu_i = (const float*)oskar_mem_void_const(uu_in);
vv_i = (const float*)oskar_mem_void_const(vv_in);
ww_i = (const float*)oskar_mem_void_const(ww_in);
uu_o = (float*)oskar_mem_void(uu_out);
vv_o = (float*)oskar_mem_void(vv_out);
ww_o = (float*)oskar_mem_void(ww_out);
#pragma omp parallel for private(i)
for (i = 0; i < num; ++i)
{
double s0 = 0.0, s1 = 0.0, s2 = 0.0, t0 = 0.0, t1 = 0.0, t2 = 0.0;
s0 = uu_i[i]; s1 = vv_i[i]; s2 = ww_i[i];
t0 = M[0] * s0 + M[1] * s1 + M[2] * s2;
t1 = M[3] * s0 + M[4] * s1 + M[5] * s2;
t2 = M[6] * s0 + M[7] * s1 + M[8] * s2;
uu_o[i] = t0; vv_o[i] = t1; ww_o[i] = t2;
}
}
else
{
double *uu_o = 0, *vv_o = 0, *ww_o = 0;
const double *uu_i = 0, *vv_i = 0, *ww_i = 0;
uu_i = (const double*)oskar_mem_void_const(uu_in);
vv_i = (const double*)oskar_mem_void_const(vv_in);
ww_i = (const double*)oskar_mem_void_const(ww_in);
uu_o = (double*)oskar_mem_void(uu_out);
vv_o = (double*)oskar_mem_void(vv_out);
ww_o = (double*)oskar_mem_void(ww_out);
#pragma omp parallel for private(i)
for (i = 0; i < num; ++i)
{
double s0 = 0.0, s1 = 0.0, s2 = 0.0, t0 = 0.0, t1 = 0.0, t2 = 0.0;
s0 = uu_i[i]; s1 = vv_i[i]; s2 = ww_i[i];
t0 = M[0] * s0 + M[1] * s1 + M[2] * s2;
t1 = M[3] * s0 + M[4] * s1 + M[5] * s2;
t2 = M[6] * s0 + M[7] * s1 + M[8] * s2;
uu_o[i] = t0; vv_o[i] = t1; ww_o[i] = t2;
}
}
oskar_timer_pause(h->tmr_rotate);
}
#ifdef __cplusplus
}
#endif
|
mem.c | #include "mem.h"
#include "vec.h"
double *Valloc(const size_t n)
{
const size_t V = n2V(n);
double *const d = (double*)(V ? aligned_alloc(VA, V * VA) : NULL);
if (d) {
// each thread zeroes-out its portion of *d
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(V,d)
#endif /* _OPENMP */
for (size_t i = (size_t)0u; i < V; ++i) {
register const VD z = VI(setzero)();
VI(store)((d + (i << VLlg)), z);
}
}
else {
perror("aligned_alloc");
if (n)
exit(EXIT_FAILURE);
}
return d;
}
double *Vfree(double *const d)
{
free(d);
return (double*)NULL;
}
Dmem *Dalloc(const size_t n)
{
Dmem *const d = (Dmem*)(n ? calloc(1u, sizeof(Dmem)) : NULL);
if (d) {
d->r.A11 = Valloc(n);
d->r.A21 = Valloc(n);
d->r.A12 = Valloc(n);
d->r.A22 = Valloc(n);
d->r.U11 = Valloc(n);
d->r.U21 = Valloc(n);
d->r.U12 = Valloc(n);
d->r.U22 = Valloc(n);
d->r.V11 = Valloc(n);
d->r.V21 = Valloc(n);
d->r.V12 = Valloc(n);
d->r.V22 = Valloc(n);
d->v.S1 = Valloc(n);
d->v.S2 = Valloc(n);
d->v.s = Valloc(n);
}
return d;
}
Dmem *Dfree(Dmem *const d)
{
if (d) {
d->v.s = Vfree(d->v.s);
d->v.S2 = Vfree(d->v.S2);
d->v.S1 = Vfree(d->v.S1);
d->r.V22 = Vfree(d->r.V22);
d->r.V12 = Vfree(d->r.V12);
d->r.V21 = Vfree(d->r.V21);
d->r.V11 = Vfree(d->r.V11);
d->r.U22 = Vfree(d->r.U22);
d->r.U12 = Vfree(d->r.U12);
d->r.U21 = Vfree(d->r.U21);
d->r.U11 = Vfree(d->r.U11);
d->r.A22 = Vfree(d->r.A22);
d->r.A12 = Vfree(d->r.A12);
d->r.A21 = Vfree(d->r.A21);
d->r.A11 = Vfree(d->r.A11);
free(d);
}
return (Dmem*)NULL;
}
Zmem *Zalloc(const size_t n)
{
Zmem *const z = (Zmem*)(n ? calloc(1u, sizeof(Zmem)) : NULL);
if (z) {
z->r.A11 = Valloc(n);
z->r.A21 = Valloc(n);
z->r.A12 = Valloc(n);
z->r.A22 = Valloc(n);
z->r.U11 = Valloc(n);
z->r.U21 = Valloc(n);
z->r.U12 = Valloc(n);
z->r.U22 = Valloc(n);
z->r.V11 = Valloc(n);
z->r.V21 = Valloc(n);
z->r.V12 = Valloc(n);
z->r.V22 = Valloc(n);
z->i.A11 = Valloc(n);
z->i.A21 = Valloc(n);
z->i.A12 = Valloc(n);
z->i.A22 = Valloc(n);
z->i.U11 = Valloc(n);
z->i.U21 = Valloc(n);
z->i.U12 = Valloc(n);
z->i.U22 = Valloc(n);
z->i.V11 = Valloc(n);
z->i.V21 = Valloc(n);
z->i.V12 = Valloc(n);
z->i.V22 = Valloc(n);
z->v.S1 = Valloc(n);
z->v.S2 = Valloc(n);
z->v.s = Valloc(n);
}
return z;
}
Zmem *Zfree(Zmem *const z)
{
if (z) {
z->v.s = Vfree(z->v.s);
z->v.S2 = Vfree(z->v.S2);
z->v.S1 = Vfree(z->v.S1);
z->i.V22 = Vfree(z->i.V22);
z->i.V12 = Vfree(z->i.V12);
z->i.V21 = Vfree(z->i.V21);
z->i.V11 = Vfree(z->i.V11);
z->i.U22 = Vfree(z->i.U22);
z->i.U12 = Vfree(z->i.U12);
z->i.U21 = Vfree(z->i.U21);
z->i.U11 = Vfree(z->i.U11);
z->i.A22 = Vfree(z->i.A22);
z->i.A12 = Vfree(z->i.A12);
z->i.A21 = Vfree(z->i.A21);
z->i.A11 = Vfree(z->i.A11);
z->r.V22 = Vfree(z->r.V22);
z->r.V12 = Vfree(z->r.V12);
z->r.V21 = Vfree(z->r.V21);
z->r.V11 = Vfree(z->r.V11);
z->r.U22 = Vfree(z->r.U22);
z->r.U12 = Vfree(z->r.U12);
z->r.U21 = Vfree(z->r.U21);
z->r.U11 = Vfree(z->r.U11);
z->r.A22 = Vfree(z->r.A22);
z->r.A12 = Vfree(z->r.A12);
z->r.A21 = Vfree(z->r.A21);
z->r.A11 = Vfree(z->r.A11);
free(z);
}
return (Zmem*)NULL;
}
Tout *Talloc(const size_t n)
{
Tout *const t = (Tout*)(n ? calloc((size_t)1u, sizeof(Tout)) : NULL);
if (t) {
const size_t V = n2V(n);
const size_t N = n2N(n);
if (!(t->K2 = (wide*)malloc(N * sizeof(wide)))) {
perror("malloc(K2)");
exit(EXIT_FAILURE);
}
if (!(t->RE = (wide*)malloc(N * sizeof(wide)))) {
perror("malloc(RE)");
exit(EXIT_FAILURE);
}
if (!(t->OU = (wide*)malloc(N * sizeof(wide)))) {
perror("malloc(OU)");
exit(EXIT_FAILURE);
}
if (!(t->OV = (wide*)malloc(N * sizeof(wide)))) {
perror("malloc(OV)");
exit(EXIT_FAILURE);
}
const size_t w = (VL * sizeof(wide));
// each thread zeroes-out its portion of t->*
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(V,t,w)
#endif /* _OPENMP */
for (size_t i = (size_t)0u; i < V; ++i) {
const size_t j = (i << VLlg);
(void)memset((t->K2 + j), 0, w);
(void)memset((t->RE + j), 0, w);
(void)memset((t->OU + j), 0, w);
(void)memset((t->OV + j), 0, w);
}
}
else {
perror("calloc");
if (n)
exit(EXIT_FAILURE);
}
return t;
}
Tout *Tfree(Tout *const t)
{
if (t) {
free(t->OV);
free(t->OU);
free(t->RE);
free(t->K2);
free(t);
}
return (Tout*)NULL;
}
|
relu1_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: bzhang@openailab.com
*/
#include "sys_port.h"
#include "module.h"
#include "tengine_errno.h"
#include "tengine_log.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
int ref_relu1_fp32(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, int num_thread)
{
int w = input_tensor->dims[3];
int h = output_tensor->dims[2];
int channels = input_tensor->dims[1];
int size = h * w;
int c_step = h * w;
float* input_data = input_tensor->data;
float* out_data = output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = input_data + c_step * q;
float* dst = out_data + c_step * q;
for (int i = 0; i < size; i++)
{
dst[i] = src[i];
if (dst[i] > 1)
dst[i] = 1;
if (dst[i] < -1)
dst[i] = -1;
}
}
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
struct ir_tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
ref_relu1_fp32(input_tensor, output_tensor, exec_graph->num_thread);
return 0;
}
static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* node = exec_node->ir_node;
struct ir_graph* ir_graph = node->graph;
struct ir_tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]);
struct ir_tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]);
int ret = set_ir_tensor_shape(output, input->dims, input->dim_num);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = NULL,
.run = run,
.reshape = reshape,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_relu1_hcl_ops(void* arg)
{
return register_builtin_node_ops(OP_RELU1, &hcl_node_ops);
}
static int unreg_relu1_hcl_ops(void* arg)
{
return unregister_builtin_node_ops(OP_RELU1, &hcl_node_ops);
}
AUTO_REGISTER_OPS(reg_relu1_hcl_ops);
AUTO_UNREGISTER_OPS(unreg_relu1_hcl_ops);
|
variable_utils.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
// Ruben Zorrilla
// Vicente Mataix Ferrandiz
//
//
#if !defined(KRATOS_VARIABLE_UTILS )
#define KRATOS_VARIABLE_UTILS
/* System includes */
/* External includes */
/* Project includes */
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/checks.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class VariableUtils
* @ingroup KratosCore
* @brief This class implements a set of auxiliar, already parallelized, methods to
* perform some common tasks related with the variable values and fixity.
* @details The methods are exported to python in order to add this improvements to the python interface
* @author Riccardo Rossi
* @author Ruben Zorrilla
* @author Vicente Mataix Ferrandiz
*/
class KRATOS_API(KRATOS_CORE) VariableUtils
{
public:
///@name Type Definitions
///@{
/// We create the Pointer related to VariableUtils
KRATOS_CLASS_POINTER_DEFINITION(VariableUtils);
/// The nodes container
typedef ModelPart::NodesContainerType NodesContainerType;
/// The conditions container
typedef ModelPart::ConditionsContainerType ConditionsContainerType;
/// The elements container
typedef ModelPart::ElementsContainerType ElementsContainerType;
/// A definition of the double variable
typedef Variable< double > DoubleVarType;
/// A definition of the component variable
typedef VariableComponent< VectorComponentAdaptor<array_1d<double, 3> > > ComponentVarType;
/// A definition of the array variable
typedef Variable< array_1d<double, 3 > > ArrayVarType;
///@}
///@name Life Cycle
///@{
/** Constructor.
*/
/** Destructor.
*/
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Copies the nodal value of a variable from an origin model
* part nodes to the nodes in a destination model part. It is assumed that
* both origin and destination model parts have the same number of nodes.
* @param rVariable reference to the variable to get the value from
* @param rDestinationVariable reference to the variable to be set
* @param rOriginModelPart origin model part from where the values are retrieved
* @param rDestinationModelPart destination model part to where the values are copied to
* @param BuffStep buffer step
*/
template< class TVarType >
void CopyModelPartNodalVar(
const TVarType& rVariable,
const TVarType& rDestinationVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const unsigned int BuffStep = 0)
{
const int n_orig_nodes = rOriginModelPart.NumberOfNodes();
const int n_dest_nodes = rDestinationModelPart.NumberOfNodes();
KRATOS_ERROR_IF_NOT(n_orig_nodes == n_dest_nodes) << "Origin and destination model parts have different number of nodes."
<< "\n\t- Number of origin nodes: " << n_orig_nodes
<< "\n\t- Number of destination nodes: " << n_dest_nodes << std::endl;
#pragma omp parallel for
for(int i_node = 0; i_node < n_orig_nodes; ++i_node){
auto it_dest_node = rDestinationModelPart.NodesBegin() + i_node;
const auto &it_orig_node = rOriginModelPart.NodesBegin() + i_node;
const auto &r_value = it_orig_node->GetSolutionStepValue(rVariable, BuffStep);
it_dest_node->GetSolutionStepValue(rDestinationVariable, BuffStep) = r_value;
}
}
/**
* @brief Copies the nodal value of a variable from an origin model
* part nodes to the nodes in a destination model part. It is assumed that
* both origin and destination model parts have the same number of nodes.
* @param rVariable reference to the variable to get the value from and to save in
* @param rOriginModelPart origin model part from where the values are retrieved
* @param rDestinationModelPart destination model part to where the values are copied to
* @param BuffStep buffer step
*/
template< class TVarType >
void CopyModelPartNodalVar(
const TVarType& rVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const unsigned int BuffStep = 0)
{
this->CopyModelPartNodalVar(rVariable, rVariable, rOriginModelPart, rDestinationModelPart, BuffStep);
}
template< class TVarType >
void CopyModelPartNodalVarToNonHistoricalVar(
const TVarType &rVariable,
const TVarType &rDestinationVariable,
const ModelPart &rOriginModelPart,
ModelPart &rDestinationModelPart,
const unsigned int BuffStep = 0)
{
const int n_orig_nodes = rOriginModelPart.NumberOfNodes();
const int n_dest_nodes = rDestinationModelPart.NumberOfNodes();
KRATOS_ERROR_IF_NOT(n_orig_nodes == n_dest_nodes) <<
"Origin and destination model parts have different number of nodes." <<
"\n\t- Number of origin nodes: " << n_orig_nodes <<
"\n\t- Number of destination nodes: " << n_dest_nodes << std::endl;
#pragma omp parallel for
for(int i_node = 0; i_node < n_orig_nodes; ++i_node){
auto it_dest_node = rDestinationModelPart.NodesBegin() + i_node;
const auto &it_orig_node = rOriginModelPart.NodesBegin() + i_node;
const auto &r_value = it_orig_node->GetSolutionStepValue(rVariable, BuffStep);
it_dest_node->GetValue(rDestinationVariable) = r_value;
}
}
template< class TVarType >
void CopyModelPartNodalVarToNonHistoricalVar(
const TVarType &rVariable,
const ModelPart &rOriginModelPart,
ModelPart &rDestinationModelPart,
const unsigned int BuffStep = 0)
{
this->CopyModelPartNodalVarToNonHistoricalVar(rVariable, rVariable, rOriginModelPart, rDestinationModelPart, BuffStep);
}
/**
* @brief Copies the elemental value of a variable from an origin model
* part elements to the elements in a destination model part. It is assumed that
* both origin and destination model parts have the same number of elements.
* @param rVariable reference to the variable to be set
* @param rOriginModelPart origin model part from where the values are retrieved
* @param rDestinationModelPart destination model part to where the values are copied to
* @param BuffStep buffer step
*/
template< class TVarType >
void CopyModelPartElementalVar(
const TVarType& rVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart){
const int n_orig_elems = rOriginModelPart.NumberOfElements();
const int n_dest_elems = rDestinationModelPart.NumberOfElements();
KRATOS_ERROR_IF_NOT(n_orig_elems == n_dest_elems) << "Origin and destination model parts have different number of elements."
<< "\n\t- Number of origin elements: " << n_orig_elems
<< "\n\t- Number of destination elements: " << n_dest_elems << std::endl;
#pragma omp parallel for
for(int i_elems = 0; i_elems < n_orig_elems; ++i_elems){
auto it_dest_elems = rDestinationModelPart.ElementsBegin() + i_elems;
const auto &it_orig_elems = rOriginModelPart.ElementsBegin() + i_elems;
const auto &r_value = it_orig_elems->GetValue(rVariable);
it_dest_elems->SetValue(rVariable,r_value);
}
}
/**
* @brief Sets the nodal value of a scalar variable
* @param rVariable reference to the scalar variable to be set
* @param Value Value to be set
* @param rNodes reference to the objective node set
*/
template <class TVarType>
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetVariable")
void SetScalarVar(
const TVarType &rVariable,
const double Value,
NodesContainerType &rNodes)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
it_node->FastGetSolutionStepValue(rVariable) = Value;
}
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of a scalar variable (considering flag)
* @param rVariable reference to the scalar variable to be set
* @param Value Value to be set
* @param rNodes reference to the objective node set
* @param Flag The flag to be considered in the assignation
* @param Check What is checked from the flag
*/
template< class TVarType >
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetVariable")
void SetScalarVarForFlag(
const TVarType& rVariable,
const double Value,
NodesContainerType& rNodes,
const Flags Flag,
const bool Check = true
)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
if (it_node->Is(Flag) == Check) it_node->FastGetSolutionStepValue(rVariable) = Value;
}
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of a vector variable
* @param rVariable reference to the vector variable to be set
* @param Value array containing the Value to be set
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetVariable")
void SetVectorVar(
const ArrayVarType& rVariable,
const array_1d<double, 3 >& Value,
NodesContainerType& rNodes
);
/**
* @brief Sets the nodal value of a vector variable (considering flag)
* @param rVariable reference to the vector variable to be set
* @param Value array containing the Value to be set
* @param rNodes reference to the objective node set
* @param Flag The flag to be considered in the assignation
* @param Check What is checked from the flag
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetVariable")
void SetVectorVarForFlag(
const ArrayVarType& rVariable,
const array_1d<double, 3 >& Value,
NodesContainerType& rNodes,
const Flags Flag,
const bool Check = true
);
/**
* @brief Sets the nodal value of a scalar variable
* @tparam TDataType Variable data type
* @tparam Variable<TDataType> Variable type
* @param rVariable reference to the scalar variable to be set
* @param Value Value to be set
* @param rNodes reference to the objective node set
*/
template<class TDataType, class TVarType = Variable<TDataType> >
void SetVariable(
const TVarType& rVariable,
const TDataType& rValue,
NodesContainerType& rNodes
)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
it_node->FastGetSolutionStepValue(rVariable) = rValue;
}
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of a scalar variable (considering flag)
* @tparam TDataType Variable data type
* @tparam Variable<TDataType> Variable type
* @param rVariable reference to the scalar variable to be set
* @param rValue Value to be set
* @param rNodes reference to the objective node set
* @param Flag The flag to be considered in the assignation
* @param Check What is checked from the flag
*/
template <class TDataType, class TVarType = Variable<TDataType>>
void SetVariable(
const TVarType &rVariable,
const TDataType &rValue,
NodesContainerType &rNodes,
const Flags Flag,
const bool CheckValue = true)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k < static_cast<int>(rNodes.size()); ++k)
{
auto it_node = rNodes.begin() + k;
if (it_node->Is(Flag) == CheckValue)
{
it_node->FastGetSolutionStepValue(rVariable) = rValue;
}
}
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of any variable to zero
* @param rVariable reference to the scalar variable to be set
* @param rNodes reference to the objective node set
*/
template< class TType , class TContainerType>
void SetNonHistoricalVariableToZero(
const Variable< TType >& rVariable,
TContainerType& rContainer)
{
KRATOS_TRY
this->SetNonHistoricalVariable(rVariable, rVariable.Zero(), rContainer);
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of any variable to zero
* @param rVariable reference to the scalar variable to be set
* @param rNodes reference to the objective node set
*/
template< class TType >
void SetHistoricalVariableToZero(
const Variable< TType >& rVariable,
NodesContainerType& rNodes)
{
KRATOS_TRY
this->SetVariable(rVariable, rVariable.Zero(), rNodes);
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of a scalar variable non historical
* @param rVariable reference to the scalar variable to be set
* @param Value Value to be set
* @param rNodes reference to the objective node set
*/
template< class TVarType >
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetNonHistoricalVariable")
void SetNonHistoricalScalarVar(
const TVarType& rVariable,
const double Value,
NodesContainerType& rNodes
)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
it_node->SetValue(rVariable, Value);
}
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of a vector non historical variable
* @param rVariable reference to the vector variable to be set
* @param Value array containing the Value to be set
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetNonHistoricalVariable")
void SetNonHistoricalVectorVar(
const ArrayVarType& rVariable,
const array_1d<double, 3 >& Value,
NodesContainerType& rNodes
);
/**
* @brief Sets the container value of any type of non historical variable
* @param rVariable reference to the scalar variable to be set
* @param Value Value to be set
* @param rContainer Reference to the objective container
*/
template< class TType, class TContainerType, class TVarType = Variable< TType >>
void SetNonHistoricalVariable(
const TVarType& rVariable,
const TType& Value,
TContainerType& rContainer
)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) {
auto it_cont = rContainer.begin() + k;
it_cont->SetValue(rVariable, Value);
}
KRATOS_CATCH("")
}
/**
* @brief Sets the container value of any type of non historical variable (considering flag)
* @param rVariable reference to the scalar variable to be set
* @param Value Value to be set
* @param rContainer Reference to the objective container
* @param Flag The flag to be considered in the assignation
* @param Check What is checked from the flag
*/
template< class TType, class TContainerType, class TVarType = Variable< TType >>
void SetNonHistoricalVariable(
const TVarType& rVariable,
const TType& rValue,
TContainerType& rContainer,
const Flags Flag,
const bool Check = true
)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) {
auto it_cont = rContainer.begin() + k;
if (it_cont->Is(Flag) == Check) {
it_cont->SetValue(rVariable, rValue);
}
}
KRATOS_CATCH("")
}
/**
* @brief Clears the container data value container
* @param rContainer Reference to the objective container
*/
template< class TContainerType>
void ClearNonHistoricalData(TContainerType& rContainer)
{
KRATOS_TRY
const auto it_cont_begin = rContainer.begin();
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) {
auto it_cont = it_cont_begin + k;
it_cont->Data().Clear();
}
KRATOS_CATCH("")
}
/**
* @brief Distributes variable values in TContainerType container to nodes
*
* This method distributes variables values stored in TContainerType data value container in rModelPart
* to nodes. Constant weighting is used for each node based on rWeightVariable value. The result
* is stored in nodal non-historical data value container under the same rVariable. If IsInverseWeightProvided
* is true, then the weights provided by rWeightVariable is inverted to get nodal weight. Otherwise, the value
* given by rWeightVariable is used as weight.
*
*
* @tparam TDataType Data type
* @tparam TContainerType ContainerType of model part
* @tparam TWeightDataType Data type of weight variable (this should be either int or double)
* @param rModelPart Model part
* @param rVariable Variable to be distributed
* @param rWeightVariable Variable which holds weight to distribute entity values to nodes
* @param IsInverseWeightProvided Whether the weight is provided as inverse or not.
*/
template <class TDataType, class TContainerType, class TWeightDataType>
void WeightedAccumulateVariableOnNodes(
ModelPart& rModelPart,
const Variable<TDataType>& rVariable,
const Variable<TWeightDataType>& rWeightVariable,
const bool IsInverseWeightProvided = false);
/**
* @brief Sets a flag according to a given status over a given container
* @param rFlag flag to be set
* @param rFlagValue flag value to be set
* @param rContainer Reference to the objective container
*/
template< class TContainerType >
void SetFlag(
const Flags& rFlag,
const bool& rFlagValue,
TContainerType& rContainer
)
{
KRATOS_TRY
const auto it_cont_begin = rContainer.begin();
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) {
auto it_cont = it_cont_begin + k;
it_cont->Set(rFlag, rFlagValue);
}
KRATOS_CATCH("")
}
/**
* @brief Flips a flag over a given container
* @param rFlag flag to be set
* @param rContainer Reference to the objective container
*/
template< class TContainerType >
void ResetFlag(
const Flags& rFlag,
TContainerType& rContainer
)
{
KRATOS_TRY
const auto it_cont_begin = rContainer.begin();
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) {
auto it_cont = it_cont_begin + k;
it_cont->Reset(rFlag);
}
KRATOS_CATCH("")
}
/**
* @brief Flips a flag over a given container
* @param rFlag flag to be set
* @param rContainer Reference to the objective container
*/
template< class TContainerType >
void FlipFlag(
const Flags& rFlag,
TContainerType& rContainer
)
{
KRATOS_TRY
const auto it_cont_begin = rContainer.begin();
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) {
auto it_cont = it_cont_begin + k;
it_cont->Flip(rFlag);
}
KRATOS_CATCH("")
}
/**
* @brief Takes the value of a non-historical vector variable and sets it in other variable
* @param OriginVariable reference to the origin vector variable
* @param SavedVariable reference to the destination vector variable
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SaveVariable")
void SaveVectorVar(
const ArrayVarType& OriginVariable,
const ArrayVarType& SavedVariable,
NodesContainerType& rNodes
);
/**
* @brief Takes the value of a non-historical scalar variable and sets it in other variable
* @param OriginVariable reference to the origin scalar variable
* @param SavedVariable reference to the destination scalar variable
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SaveVariable")
void SaveScalarVar(
const DoubleVarType& OriginVariable,
const DoubleVarType& SavedVariable,
NodesContainerType& rNodes
);
/**
* @brief Takes the value of a non-historical variable and saves it in another variable
* For a nodal container, this takes the value of a non-historical variable and saves it in another one
* @tparam TDataType The variable data type
* @tparam Variable<TDataType> The variable type
* @param rOriginVariable Reference to the origin variable
* @param rSavedVariable Reference to the destination variable
* @param rNodesContainer Reference to the nodal container
*/
template< class TDataType, class TVariableType = Variable<TDataType> >
void SaveVariable(
const TVariableType &rOriginVariable,
const TVariableType &rSavedVariable,
NodesContainerType &rNodesContainer)
{
KRATOS_TRY
#pragma omp parallel for
for (int i_node = 0; i_node < static_cast<int>(rNodesContainer.size()); ++i_node) {
auto it_node = rNodesContainer.begin() + i_node;
it_node->SetValue(rSavedVariable, it_node->FastGetSolutionStepValue(rOriginVariable));
}
KRATOS_CATCH("")
}
/**
* @brief Takes the value of a non-historical vector variable and sets it in other non-historical variable
* @param OriginVariable reference to the origin vector variable
* @param SavedVariable reference to the destination vector variable
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SaveNonHistoricalVariable")
void SaveVectorNonHistoricalVar(
const ArrayVarType& OriginVariable,
const ArrayVarType& SavedVariable,
NodesContainerType& rNodes
);
/**
* @brief Takes the value of a non-historical scalar variable and sets it in other non-historical variable
* @param OriginVariable reference to the origin scalar variable
* @param SavedVariable reference to the destination scalar variable
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SaveNonHistoricalVariable")
void SaveScalarNonHistoricalVar(
const DoubleVarType& OriginVariable,
const DoubleVarType& SavedVariable,
NodesContainerType& rNodes
);
/**
* @brief Takes the value of a non-historical variable and saves it in another historical variable
* For a non-nodal container, this method takes the value of an origin variable and saves it in a destination one
* @tparam TDataType The variable data type
* @tparam TContainerType The container type
* @tparam Variable<TDataType> The variable type
* @param rOriginVariable Reference to the origin variable
* @param rSavedVariable Reference to the destination variable
* @param rContainer Reference to the container of interest
*/
template< class TDataType, class TContainerType, class TVariableType = Variable<TDataType> >
void SaveNonHistoricalVariable(
const TVariableType &rOriginVariable,
const TVariableType &rSavedVariable,
TContainerType &rContainer
)
{
KRATOS_TRY
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(rContainer.size()); ++i) {
auto it_cont = rContainer.begin() + i;
it_cont->SetValue(rSavedVariable, it_cont->GetValue(rOriginVariable));
}
KRATOS_CATCH("")
}
/**
* @brief Takes the value of an historical vector variable and sets it in other variable
* @param OriginVariable reference to the origin vector variable
* @param DestinationVariable reference to the destination vector variable
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use CopyVariable")
void CopyVectorVar(
const ArrayVarType& OriginVariable,
const ArrayVarType& DestinationVariable,
NodesContainerType& rNodes
);
/**
* @brief Takes the value of an historical component variable and sets it in other variable
* @param OriginVariable reference to the origin component variable
* @param DestinationVariable reference to the destination component variable
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use CopyVariable")
void CopyComponentVar(
const ComponentVarType& OriginVariable,
const ComponentVarType& DestinationVariable,
NodesContainerType& rNodes
);
/**
* @brief Takes the value of an historical double variable and sets it in other variable
* @param OriginVariable reference to the origin double variable
* @param DestinationVariable reference to the destination double variable
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use CopyVariable")
void CopyScalarVar(
const DoubleVarType &OriginVariable,
const DoubleVarType &DestinationVariable,
NodesContainerType &rNodes);
/**
* @brief Takes the value of an historical variable and sets it in another variable
* This function takes the value of an historical variable and sets in another
* variable in all the nodes of the provided container.
* @tparam TDataType The variable data type
* @tparam Variable<TDataType> The variable type
* @param rOriginVariable Reference to the origin variable
* @param rDestinationVariable Reference to the destination variable
* @param rNodesContainer Reference to the nodes container
*/
template< class TDataType, class TVariableType = Variable<TDataType> >
void CopyVariable(
const TVariableType &rOriginVariable,
const TVariableType &rDestinationVariable,
NodesContainerType &rNodesContainer)
{
KRATOS_TRY
#pragma omp parallel for
for (int i_node = 0; i_node < static_cast<int>(rNodesContainer.size()); ++i_node) {
auto it_node = rNodesContainer.begin() + i_node;
it_node->FastGetSolutionStepValue(rDestinationVariable) = it_node->FastGetSolutionStepValue(rOriginVariable);
}
KRATOS_CATCH("")
}
/**
* @brief Returns a list of nodes filtered using the given double variable and value
* @param Variable reference to the double variable to be filtered
* @param Value Filtering Value
* @param rOriginNodes Reference to the objective node set
* @return selected_nodes: List of filtered nodes
*/
NodesContainerType SelectNodeList(
const DoubleVarType& Variable,
const double Value,
const NodesContainerType& rOriginNodes
);
/**
* @brief Checks if all the nodes of a node set has the specified variable
* @param rVariable reference to a variable to be checked
* @param rNodes reference to the nodes set to be checked
* @return 0: if succeeds, return 0
*/
template<class TVarType>
int CheckVariableExists(
const TVarType& rVariable,
const NodesContainerType& rNodes
)
{
KRATOS_TRY
for (auto& i_node : rNodes)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(rVariable, i_node);
return 0;
KRATOS_CATCH("");
}
/**
* @brief Fixes or frees a variable for all of the nodes in the list. The dof has to exist.
* @param rVar reference to the variable to be fixed or freed
* @param IsFixed if true fixes, if false frees
* @param rNodes reference to the nodes set to be frixed or freed
*/
template< class TVarType >
void ApplyFixity(
const TVarType& rVar,
const bool IsFixed,
NodesContainerType& rNodes
)
{
KRATOS_TRY
if (rNodes.size() != 0) {
// checking the first node to avoid error being thrown in parallel region
KRATOS_ERROR_IF_NOT(rNodes.begin()->HasDofFor(rVar)) << "Trying to fix/free dof of variable " << rVar.Name() << " but this dof does not exist in node #" << rNodes.begin()->Id() << "!" << std::endl;
#ifdef KRATOS_DEBUG
for (const auto& r_node : rNodes) {
KRATOS_ERROR_IF_NOT(r_node.HasDofFor(rVar)) << "Trying to fix/free dof of variable " << rVar.Name() << " but this dof does not exist in node #" << r_node.Id() << "!" << std::endl;
}
#endif
CheckVariableExists(rVar, rNodes);
if (IsFixed) {
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
it_node->pGetDof(rVar)->FixDof();
}
} else {
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
it_node->pGetDof(rVar)->FreeDof();
}
}
}
KRATOS_CATCH("")
}
/**
* @brief Loops along a vector data to set its values to the nodes contained in a node set.
* @note This function is suitable for scalar historical variables, since each
* one of the values in the data vector is set to its correspondent node. Besides,
* the values must be sorted as the nodes are (value i corresponds to node i).
* @param rVar reference to the variable to be fixed or freed
* @param rData rData vector. Note that its lenght must equal the number of nodes
* @param rNodes reference to the nodes set to be set
*/
template< class TVarType >
void ApplyVector(
const TVarType& rVar,
const Vector& rData,
NodesContainerType& rNodes
)
{
KRATOS_TRY
if(rNodes.size() != 0 && rNodes.size() == rData.size()) {
// First we do a check
CheckVariableExists(rVar, rNodes);
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
it_node->FastGetSolutionStepValue(rVar) = rData[k];
}
} else
KRATOS_ERROR << "There is a mismatch between the size of data array and the number of nodes ";
KRATOS_CATCH("")
}
/**
* @brief Returns the nodal value summation of a non-historical vector variable.
* @param rVar reference to the vector variable to summed
* @param rModelPart reference to the model part that contains the objective node set
* @return sum_value: summation vector result
*/
array_1d<double, 3> SumNonHistoricalNodeVectorVariable(
const ArrayVarType& rVar,
const ModelPart& rModelPart
);
/**
* @brief Returns the nodal value summation of a non-historical scalar variable.
* @param rVar reference to the scalar variable to be summed
* @param rModelPart reference to the model part that contains the objective node set
* @return sum_value: summation result
*/
template< class TVarType >
double SumNonHistoricalNodeScalarVariable(
const TVarType& rVar,
const ModelPart& rModelPart
)
{
KRATOS_TRY
double sum_value = 0.0;
// Getting info
const auto& r_communicator = rModelPart.GetCommunicator();
const auto& r_local_mesh = r_communicator.LocalMesh();
const auto& r_nodes_array = r_local_mesh.Nodes();
const auto it_node_begin = r_nodes_array.begin();
#pragma omp parallel for reduction(+:sum_value)
for (int k = 0; k < static_cast<int>(r_nodes_array.size()); ++k) {
const auto it_node = it_node_begin + k;
sum_value += it_node->GetValue(rVar);
}
return r_communicator.GetDataCommunicator().SumAll(sum_value);
KRATOS_CATCH("")
}
/**
* @brief This method accumulates and return a variable value
* For a nodal historical variable, this method accumulates and
* returns the summation in a model part.
* @tparam TDataType Variable datatype
* @tparam Variable<TDataType> Variable type
* @param rVariable Nodal historical variable to be accumulated
* @param rModelPart Model part in where the summation is done
* @param BuffStep Buffer position
* @return TDataType Value of the summation
*/
template< class TDataType, class TVarType = Variable<TDataType> >
TDataType SumHistoricalVariable(
const TVarType &rVariable,
const ModelPart &rModelPart,
const unsigned int BuffStep = 0
)
{
KRATOS_TRY
TDataType sum_value;
AuxiliaryInitializeValue(sum_value);
const auto &r_communicator = rModelPart.GetCommunicator();
const int n_nodes = r_communicator.LocalMesh().NumberOfNodes();
#pragma omp parallel firstprivate(n_nodes)
{
TDataType private_sum_value;
AuxiliaryInitializeValue(private_sum_value);
#pragma omp for
for (int i_node = 0; i_node < n_nodes; ++i_node) {
const auto it_node = r_communicator.LocalMesh().NodesBegin() + i_node;
private_sum_value += it_node->GetSolutionStepValue(rVariable, BuffStep);
}
AuxiliaryAtomicAdd(private_sum_value, sum_value);
}
return r_communicator.GetDataCommunicator().SumAll(sum_value);
KRATOS_CATCH("")
}
/**
* @brief Returns the condition value summation of a historical vector variable
* @param rVar reference to the vector variable to be summed
* @param rModelPart reference to the model part that contains the objective condition set
* @return sum_value: summation result
*/
array_1d<double, 3> SumConditionVectorVariable(
const ArrayVarType& rVar,
const ModelPart& rModelPart
);
/**
* @brief Returns the condition value summation of a historical scalar variable
* @param rVar reference to the scalar variable to be summed
* @param rModelPart reference to the model part that contains the objective condition set
* @return sum_value: summation result
*/
template< class TVarType >
double SumConditionScalarVariable(
const TVarType& rVar,
const ModelPart& rModelPart
)
{
KRATOS_TRY
double sum_value = 0.0;
// Getting info
const auto& r_communicator = rModelPart.GetCommunicator();
const auto& r_local_mesh = r_communicator.LocalMesh();
const auto& r_conditions_array = r_local_mesh.Conditions();
const auto it_cond_begin = r_conditions_array.begin();
#pragma omp parallel for reduction(+:sum_value)
for (int k = 0; k < static_cast<int>(r_conditions_array.size()); ++k) {
const auto it_cond = it_cond_begin + k;
sum_value += it_cond->GetValue(rVar);
}
return r_communicator.GetDataCommunicator().SumAll(sum_value);
KRATOS_CATCH("")
}
/**
* @brief Returns the element value summation of a historical vector variable
* @param rVar reference to the vector variable to be summed
* @param rModelPart reference to the model part that contains the objective element set
* @return sum_value: summation result
*/
array_1d<double, 3> SumElementVectorVariable(
const ArrayVarType& rVar,
const ModelPart& rModelPart
);
/**
* @brief Returns the element value summation of a historical scalar variable
* @param rVar reference to the scalar variable to be summed
* @param rModelPart reference to the model part that contains the objective element set
* @return sum_value: summation result
*/
template< class TVarType >
double SumElementScalarVariable(
const TVarType& rVar,
const ModelPart& rModelPart
)
{
KRATOS_TRY
double sum_value = 0.0;
// Getting info
const auto& r_communicator = rModelPart.GetCommunicator();
const auto& r_local_mesh = r_communicator.LocalMesh();
const auto& r_elements_array = r_local_mesh.Elements();
const auto it_elem_begin = r_elements_array.begin();
#pragma omp parallel for reduction(+:sum_value)
for (int k = 0; k < static_cast<int>(r_elements_array.size()); ++k) {
const auto it_elem = it_elem_begin + k;
sum_value += it_elem->GetValue(rVar);
}
return r_communicator.GetDataCommunicator().SumAll(sum_value);
KRATOS_CATCH("")
}
/**
* @brief This function add dofs to the nodes in a model part. It is useful since addition is done in parallel
* @param rVar The variable to be added as DoF
* @param rModelPart reference to the model part that contains the objective element set
*/
template< class TVarType >
void AddDof(
const TVarType& rVar,
ModelPart& rModelPart
)
{
KRATOS_TRY
// First we do a chek
KRATOS_CHECK_VARIABLE_KEY(rVar)
if(rModelPart.NumberOfNodes() != 0)
KRATOS_ERROR_IF_NOT(rModelPart.NodesBegin()->SolutionStepsDataHas(rVar)) << "ERROR:: Variable : " << rVar << "not included in the Solution step data ";
rModelPart.GetNodalSolutionStepVariablesList().AddDof(&rVar);
#pragma omp parallel for
for (int k = 0; k < static_cast<int>(rModelPart.NumberOfNodes()); ++k) {
auto it_node = rModelPart.NodesBegin() + k;
it_node->AddDof(rVar);
}
KRATOS_CATCH("")
}
/**
* @brief This function add dofs to the nodes in a model part. It is useful since addition is done in parallel
* @param rVar The variable to be added as DoF
* @param rReactionVar The corresponding reaction to the added DoF
* @param rModelPart reference to the model part that contains the objective element set
*/
template< class TVarType >
void AddDofWithReaction(
const TVarType& rVar,
const TVarType& rReactionVar,
ModelPart& rModelPart
)
{
KRATOS_TRY
KRATOS_CHECK_VARIABLE_KEY(rVar)
KRATOS_CHECK_VARIABLE_KEY(rReactionVar)
if(rModelPart.NumberOfNodes() != 0) {
KRATOS_ERROR_IF_NOT(rModelPart.NodesBegin()->SolutionStepsDataHas(rVar)) << "ERROR:: DoF Variable : " << rVar << "not included in the Soluttion step data ";
KRATOS_ERROR_IF_NOT(rModelPart.NodesBegin()->SolutionStepsDataHas(rReactionVar)) << "ERROR:: Reaction Variable : " << rReactionVar << "not included in the Soluttion step data ";
}
// If in debug we do a check for all nodes
#ifdef KRATOS_DEBUG
CheckVariableExists(rVar, rModelPart.Nodes());
CheckVariableExists(rReactionVar, rModelPart.Nodes());
#endif
rModelPart.GetNodalSolutionStepVariablesList().AddDof(&rVar, &rReactionVar);
#pragma omp parallel for
for (int k = 0; k < static_cast<int>(rModelPart.NumberOfNodes()); ++k) {
auto it_node = rModelPart.NodesBegin() + k;
it_node->AddDof(rVar,rReactionVar);
}
KRATOS_CATCH("")
}
/**
* @brief This method checks the variable keys
* @return True if all the keys are correct
*/
bool CheckVariableKeys();
/**
* @brief This method checks the dofs
* @param rModelPart reference to the model part that contains the objective element set
* @return True if all the DoFs are correct
*/
bool CheckDofs(ModelPart& rModelPart);
/**
* @brief This method updates the current nodal coordinates back to the initial coordinates
* @param rNodes the nodes to be updated
*/
void UpdateCurrentToInitialConfiguration(const ModelPart::NodesContainerType& rNodes);
/**
* @param rNodes the nodes to be updated
* @brief This method updates the initial nodal coordinates to the current coordinates
*/
void UpdateInitialToCurrentConfiguration(const ModelPart::NodesContainerType& rNodes);
/**
* @brief This method updates the current coordinates
* For each node, this method takes the value of the provided variable and updates the
* current position as the initial position (X0, Y0, Z0) plus such variable value
* @param rNodes
* @param rUpdateVariable variable to retrieve the updating values from
*/
void UpdateCurrentPosition(
const ModelPart::NodesContainerType& rNodes,
const ArrayVarType& rUpdateVariable = DISPLACEMENT,
const IndexType BufferPosition = 0
);
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief Auxiliary double initialize method
* Auxiliary method to initialize a double value
* @param rValue Variable to initialize
*/
void AuxiliaryInitializeValue(double &rValue);
/**
* @brief Auxiliary array initialize method
* Auxiliary method to initialize an array value
* @param rValue Variable to initialize
*/
void AuxiliaryInitializeValue(array_1d<double,3> &rValue);
/**
* @brief Auxiliary scalar reduce method
* Auxiliary method to perform the reduction of a scalar value
* @param rPrivateValue Private variable to reduce
* @param rSumValue Variable to save the reduction
*/
void AuxiliaryAtomicAdd(
const double &rPrivateValue,
double &rSumValue
);
/**
* @brief Auxiliary array reduce method
* Auxiliary method to perform the reduction of an array value
* @param rPrivateValue Private variable to reduce
* @param rSumValue Variable to save the reduction
*/
void AuxiliaryAtomicAdd(
const array_1d<double,3> &rPrivateValue,
array_1d<double,3> &rSumValue
);
/**
* @brief This is auxiliar method to check the keys
* @return True if all the keys are OK
*/
template< class TVarType >
bool CheckVariableKeysHelper()
{
KRATOS_TRY
for (const auto& var : KratosComponents< TVarType >::GetComponents()) {
if (var.first == "NONE" || var.first == "")
std::cout << " var first is NONE or empty " << var.first << var.second << std::endl;
if (var.second->Name() == "NONE" || var.second->Name() == "")
std::cout << var.first << var.second << std::endl;
if (var.first != var.second->Name()) //name of registration does not correspond to the var name
std::cout << "Registration Name = " << var.first << " Variable Name = " << std::endl;
KRATOS_ERROR_IF((var.second)->Key() == 0) << (var.second)->Name() << " Key is 0." << std::endl \
<< "Check that Kratos variables have been correctly registered and all required applications have been imported." << std::endl;
}
return true;
KRATOS_CATCH("")
}
template <class TContainerType>
TContainerType& GetContainer(ModelPart& rModelPart);
///@}
///@name Private Acces
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class VariableUtils */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_VARIABLE_UTILS defined */
|
forces.c |
/*
* Compute forces and accumulate the virial and the potential
*/
extern double epot, vir;
void
forces(int npart, double x[], double f[], double side, double rcoff){
int i;
#pragma omp single
{
vir = 0.0;
epot = 0.0;
}
#pragma omp for reduction(+:epot,vir) schedule(static,32)
for (i=0; i<npart*3; i+=3) {
// zero force components on particle i
double fxi = 0.0;
double fyi = 0.0;
double fzi = 0.0;
int j;
// loop over all particles with index > i
for (j=i+3; j<npart*3; j+=3) {
// compute distance between particles i and j allowing for wraparound
double xx = x[i]-x[j];
double yy = x[i+1]-x[j+1];
double zz = x[i+2]-x[j+2];
if (xx< (-0.5*side) ) xx += side;
if (xx> (0.5*side) ) xx -= side;
if (yy< (-0.5*side) ) yy += side;
if (yy> (0.5*side) ) yy -= side;
if (zz< (-0.5*side) ) zz += side;
if (zz> (0.5*side) ) zz -= side;
double rd = xx*xx+yy*yy+zz*zz;
// if distance is inside cutoff radius compute forces
// and contributions to pot. energy and virial
if (rd<=rcoff*rcoff) {
double rrd = 1.0/rd;
double rrd3 = rrd*rrd*rrd;
double rrd4 = rrd3*rrd;
double r148 = rrd4*(rrd3 - 0.5);
epot += rrd3*(rrd3-1.0);
vir -= rd*r148;
fxi += xx*r148;
fyi += yy*r148;
fzi += zz*r148;
#pragma omp atomic
f[j] -= xx*r148;
#pragma omp atomic
f[j+1] -= yy*r148;
#pragma omp atomic
f[j+2] -= zz*r148;
}
}
// update forces on particle i
#pragma omp atomic
f[i] += fxi;
#pragma omp atomic
f[i+1] += fyi;
#pragma omp atomic
f[i+2] += fzi;
}
}
|
GB_binop__ge_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_01__ge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_03__ge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_uint64)
// A*D function (colscale): GB (_AxD__ge_uint64)
// D*A function (rowscale): GB (_DxB__ge_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_uint64)
// C=scalar+B GB (_bind1st__ge_uint64)
// C=scalar+B' GB (_bind1st_tran__ge_uint64)
// C=A+scalar GB (_bind2nd__ge_uint64)
// C=A'+scalar GB (_bind2nd_tran__ge_uint64)
// C type: bool
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_UINT64 || GxB_NO_GE_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ge_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__ge_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__ge_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.