source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_binop__isne_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__isne_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__isne_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int8)
// A*D function (colscale): GB (_AxD__isne_int8)
// D*A function (rowscale): GB (_DxB__isne_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int8)
// C=scalar+B GB (_bind1st__isne_int8)
// C=scalar+B' GB (_bind1st_tran__isne_int8)
// C=A+scalar GB (_bind2nd__isne_int8)
// C=A'+scalar GB (_bind2nd_tran__isne_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_INT8 || GxB_NO_ISNE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isne_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__minus_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__minus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__minus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__minus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__minus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_fp64)
// A*D function (colscale): GB (_AxD__minus_fp64)
// D*A function (rowscale): GB (_DxB__minus_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__minus_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__minus_fp64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_fp64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_fp64)
// C=scalar+B GB (_bind1st__minus_fp64)
// C=scalar+B' GB (_bind1st_tran__minus_fp64)
// C=A+scalar GB (_bind2nd__minus_fp64)
// C=A'+scalar GB (_bind2nd_tran__minus_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x - y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_FP64 || GxB_NO_MINUS_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__minus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__minus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__minus_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__minus_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__minus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__minus_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__minus_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__minus_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__minus_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__minus_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__minus_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__minus_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__minus_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB (_bind1st_tran__minus_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB (_bind2nd_tran__minus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ordering_op-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file ordering_op-inl.h
* \brief Function definition of ordering operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
#define MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
#include <mxnet/operator_util.h>
#include <dmlc/optional.h>
#include <mshadow/tensor.h>
#include <algorithm>
#include <vector>
#include <type_traits>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "./sort_op.h"
#include "./indexing_op.h"
namespace mshadow {
template<typename xpu, int src_dim, typename DType, int dst_dim>
inline Tensor<xpu, dst_dim, DType> inplace_reshape(Tensor<xpu, src_dim, DType> src,
Shape<dst_dim> target_shape) {
CHECK_EQ(src.CheckContiguous(), true);
return Tensor<xpu, dst_dim, DType>(src.dptr_, target_shape, src.stream_);
}
};
namespace mxnet {
namespace op {
// These enums are only visible within this header
namespace topk_enum {
enum TopKReturnType {kReturnValue, kReturnIndices, kReturnMask, kReturnBoth};
} // topk_enum
struct TopKParam : public dmlc::Parameter<TopKParam> {
dmlc::optional<int> axis;
int k;
int ret_typ;
bool is_ascend;
int dtype;
DMLC_DECLARE_PARAMETER(TopKParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to choose the top k indices."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(k).set_default(1)
.describe("Number of top elements to select,"
" should be always smaller than or equal to the element number in the given axis."
" A global sort is performed if set k < 1.");
DMLC_DECLARE_FIELD(ret_typ).set_default(topk_enum::kReturnIndices)
.add_enum("value", topk_enum::kReturnValue)
.add_enum("indices", topk_enum::kReturnIndices)
.add_enum("mask", topk_enum::kReturnMask)
.add_enum("both", topk_enum::kReturnBoth)
.describe("The return type.\n"
" \"value\" means to return the top k values,"
" \"indices\" means to return the indices of the top k values,"
" \"mask\" means to return a mask array containing 0 and 1. 1 means the top k values."
" \"both\" means to return a list of both values and indices of top k elements.");
DMLC_DECLARE_FIELD(is_ascend).set_default(false)
.describe("Whether to choose k largest or k smallest elements."
" Top K largest elements will be chosen if set to false.");
DMLC_DECLARE_FIELD(dtype)
.add_enum("uint8", mshadow::kUint8)
.add_enum("int32", mshadow::kInt32)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(mshadow::kFloat32)
.describe("DType of the output indices when ret_typ is \"indices\" or \"both\". "
"An error will be raised if the selected data type cannot precisely represent the "
"indices.");
}
};
struct SortParam : public dmlc::Parameter<SortParam> {
dmlc::optional<int> axis;
bool is_ascend;
DMLC_DECLARE_PARAMETER(SortParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to choose sort the input tensor."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(is_ascend).set_default(true)
.describe("Whether to sort in ascending or descending order.");
}
};
struct ArgSortParam : public dmlc::Parameter<ArgSortParam> {
dmlc::optional<int> axis;
bool is_ascend;
int dtype;
DMLC_DECLARE_PARAMETER(ArgSortParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to sort the input tensor."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(is_ascend).set_default(true)
.describe("Whether to sort in ascending or descending order.");
DMLC_DECLARE_FIELD(dtype)
.add_enum("uint8", mshadow::kUint8)
.add_enum("int32", mshadow::kInt32)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(mshadow::kFloat32)
.describe("DType of the output indices. It is only valid when ret_typ is \"indices\" or"
" \"both\". An error will be raised if the selected data type cannot precisely "
"represent the indices.");
}
};
inline void ParseTopKParam(const mxnet::TShape& src_shape, const TopKParam& param,
mxnet::TShape *target_shape, int *batch_size, int *element_num,
int *axis, int *k, bool *do_transpose, bool *is_ascend) {
*do_transpose = false;
*k = param.k;
*is_ascend = param.is_ascend;
// get batch_size, axis and element_num
if (!static_cast<bool>(param.axis)) { // No axis given
*axis = 0;
*batch_size = 1;
*element_num = src_shape.Size();
} else {
*axis = param.axis.value();
if (*axis < 0) {
*axis += src_shape.ndim();
}
CHECK(*axis >= 0 && *axis < static_cast<int>(src_shape.ndim()))
<< "Invalid axis! axis should be between 0 and "
<< src_shape.ndim() << ", found axis=" << *axis;
*batch_size = src_shape.Size() / src_shape[*axis];
*element_num = src_shape[*axis];
if (*axis != static_cast<int>(src_shape.ndim()) - 1) {
*do_transpose = true;
}
}
// get k
if (param.k <= 0) {
*k = *element_num;
}
// get target_shape
if (!static_cast<bool>(param.axis)) {
if (param.ret_typ != topk_enum::kReturnMask) {
*target_shape = mshadow::Shape1(*k);
} else {
*target_shape = src_shape;
}
} else {
*target_shape = src_shape;
if (param.ret_typ != topk_enum::kReturnMask) {
(*target_shape)[*axis] = *k;
}
}
CHECK(*k >= 1 && *k <= *element_num) << "k must be smaller than "
<< *element_num << ", get k = " << *k;
}
using namespace mshadow;
struct fill_ind_to_one {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, const int* indices, DType* out) {
out[indices[i]] = static_cast<DType>(1);
}
};
struct fill_ind {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, const int* indices, const DType* val,
int req, DType* out) {
KERNEL_ASSIGN(out[indices[i]], req, val[i]);
}
};
template<typename DType>
MSHADOW_FORCE_INLINE void TopKSort(const Tensor<cpu, 1, DType>& dat,
const Tensor<cpu, 1, int>& ind,
const Tensor<cpu, 1, char>& work,
int K, int N, bool is_ascend,
Stream<cpu> *s) {
// Use full sort when K is relatively large.
const bool full_sort(K*8 > N);
// Batch size.
const int M(work.size(0)/(sizeof(DType)*N));
const int omp_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount());
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < M; ++i) {
// Tensor `work` stores the flattened source data, while `dat` stores the sorted result.
DType *vals = reinterpret_cast<DType*>(work.dptr_);
DType *sorted_vals = dat.dptr_+i*N;
int *indices = ind.dptr_+i*N;
if (is_ascend) {
if (full_sort) {
std::sort(indices, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] < vals[i2]; });
} else {
std::partial_sort(indices, indices+K, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] < vals[i2]; });
}
} else {
if (full_sort) {
std::sort(indices, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] > vals[i2]; });
} else {
std::partial_sort(indices, indices+K, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] > vals[i2]; });
}
}
for (int j = 0; j < K; ++j) {
sorted_vals[j] = vals[indices[j]];
}
}
}
#ifdef __CUDACC__
template<typename DType>
MSHADOW_XINLINE bool TopKCompare(DType val1, int ind1, DType val2, int ind2, bool is_ascend) {
// Negative indices denote undefined values which are considered arbitrary small resp. large.
return (ind2 < 0) || (ind1 >= 0 && ((is_ascend && val1 < val2) || (!is_ascend && val1 > val2)));
}
template<typename DType>
MSHADOW_XINLINE void MergeTopK(int K, DType *val1, int *ind1, DType *val2, int *ind2,
bool is_ascend) {
// In-place merge of two sorted top-K lists into val1/ind1. First determine the intervals
// [0,..,i1], [0,..i2] of the two lists that will be part of the merged list.
int i1(K-1), i2(K-1);
for (int i = 0; i < K; ++i) {
if (TopKCompare(val1[i1], ind1[i1], val2[i2], ind2[i2], is_ascend)) {
--i2;
} else {
--i1;
}
}
// Now merge the lists from back to front.
for (int i = K; i--;) {
if (i2 < 0 || i1 >= 0 && TopKCompare(val2[i2], ind2[i2], val1[i1], ind1[i1], is_ascend)) {
val1[i] = val1[i1];
ind1[i] = ind1[i1];
--i1;
} else {
val1[i] = val2[i2];
ind1[i] = ind2[i2];
--i2;
}
}
}
template<typename DType>
__global__ void PartialSortSmallK(int K, int N, DType *val, int *ind, bool is_ascend) {
// Buffer for pairwise reduction.
extern __shared__ int buff[];
// Start of buffer sections associated with this thread.
const int offset(threadIdx.x*K);
int *ind_buff = &buff[offset];
DType *val_buff = reinterpret_cast<DType*>(&buff[blockDim.x*K])+offset;
// Initialize top-K values for this thread.
for (int i = 0; i < K; ++i) {
ind_buff[i] = -1;
}
// Range of values this thread cares about. Each thread block processes
// a different batch item (i.e. a different set of ind/val where we
// have to select the top-K elements). All threads within the same
// block work on the same batch item.
const int first(blockIdx.x*N+threadIdx.x), last((blockIdx.x+1)*N);
// Select top-K from this range and store it sorted in the buffer.
// We assume a small K, so linear insertion is o.k.
for (int i = first; i < last; i += blockDim.x) {
DType cur_val(val[i]);
int cur_ind(ind[i]);
for (int j = K; j-- && TopKCompare(cur_val, cur_ind, val_buff[j], ind_buff[j], is_ascend); ) {
if (j+1 < K) {
val_buff[j+1] = val_buff[j];
ind_buff[j+1] = ind_buff[j];
}
val_buff[j] = cur_val;
ind_buff[j] = cur_ind;
}
}
// Recursive merge of sorted lists for this thread block. Note that blockDim.x is not
// necessary a power of two, therefore the additional checks for last_s.
for (unsigned int s = (blockDim.x+1)/2, last_s = blockDim.x;
last_s > 1; last_s = s, s = (s+1)/2) {
__syncthreads();
if (threadIdx.x < s && threadIdx.x+s < last_s) {
MergeTopK(K, val_buff, ind_buff, val_buff+s*K, ind_buff+s*K, is_ascend);
}
}
// Final updates on master thread.
if (threadIdx.x == 0) {
for (int i = 0; i < K; ++i) {
ind[blockIdx.x*N+i] = ind_buff[i];
val[blockIdx.x*N+i] = val_buff[i];
}
}
}
template<typename DType>
MSHADOW_FORCE_INLINE void TopKSort(const Tensor<gpu, 1, DType>& dat,
const Tensor<gpu, 1, int>& ind,
const Tensor<gpu, 1, char>& work,
int K, int N, bool is_ascend,
Stream<gpu> *s) {
// Use full sort for all but very small K for which we
// can do a partial sort entirely within shared memory.
const bool full_sort(K > 5);
// Batch size.
const int M(dat.size(0)/N);
if (full_sort) {
// Divide workspace into two parts. The first one is needed to store batch ids.
size_t alignment = std::max(sizeof(DType), sizeof(int));
size_t id_size = PadBytes(sizeof(int) * ind.size(0), alignment);
Tensor<gpu, 1, int> batch_id(reinterpret_cast<int*>(work.dptr_), Shape1(ind.size(0)), s);
Tensor<gpu, 1, char> sort_work(work.dptr_+id_size, Shape1(work.size(0)-id_size), s);
mxnet::op::SortByKey(dat, ind, is_ascend, &sort_work);
if (M > 1) {
// Back to back sorting. Note that mxnet::op::SortByKey is a stable sort.
batch_id = ind / N;
mxnet::op::SortByKey(batch_id, dat, true, &sort_work);
batch_id = ind / N;
mxnet::op::SortByKey(batch_id, ind, true, &sort_work);
}
} else {
const int nthreads(mshadow::cuda::kBaseThreadNum);
PartialSortSmallK<<<M, nthreads, nthreads*K*(sizeof(int)+sizeof(DType)),
mshadow::Stream<gpu>::GetStream(s)>>>
(K, N, dat.dptr_, ind.dptr_, is_ascend);
}
}
#endif
/*!
* \brief Implementation of the TopK operation
*
*
* \param ctx the running context
* \param resource temporary resource handler
* \param src the Source blob
* \param ret the destination blobs
* \param k the K elements to keep
* \param param the topk parameters
* \tparam xpu the device type.
* \tparam DType type of the output value/mask.
* \tparam IDType type of the output indices.
*/
template<typename xpu, typename DType, typename IDType>
void TopKImpl(const RunContext &ctx,
const Resource &resource,
const std::vector<OpReqType>& req,
const TBlob& src,
const std::vector<TBlob>& ret,
const TopKParam& param) {
using namespace mshadow;
using namespace mshadow::expr;
// 1. Parse and initialize information
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 1, char> workspace;
Tensor<xpu, 1, char> temp_workspace;
Tensor<xpu, 1, DType> sorted_dat;
Tensor<xpu, 1, int> indices, sel_indices;
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
size_t alignment = std::max(sizeof(DType), sizeof(int));
mxnet::TShape target_shape;
ParseTopKParam(src.shape_, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDType>())
<< "'IDType' does not have a sufficient precision to represent the indices of the input array. "
<< "The total element_num is " << element_num << ", but the selected IDType can only represent "
<< mxnet::common::MaxIntegerValue<IDType>() << " elements";
Tensor<xpu, 3, DType> dat = src.FlatTo3D<xpu, DType>(axis, axis, s);
size_t temp_size = 0;
// Temp space needed by the gpu-based full sorts.
temp_size = std::max(temp_size,
mxnet::op::SortByKeyWorkspaceSize<int, int, xpu>(src.Size()));
temp_size = std::max(temp_size,
mxnet::op::SortByKeyWorkspaceSize<int, DType, xpu>(src.Size()));
temp_size = std::max(temp_size,
mxnet::op::SortByKeyWorkspaceSize<DType, int, xpu>(src.Size()));
// Additional temp space for gpu full sorts for batch ids.
temp_size += PadBytes(sizeof(int) * src.Size(), alignment);
// Temp space for cpu sorts.
temp_size = std::max(temp_size, static_cast<size_t>(sizeof(DType) * src.Size()));
size_t workspace_size = temp_size + PadBytes(sizeof(DType) * src.Size(), alignment)
+ PadBytes(sizeof(int) * src.Size(), alignment);
if (param.ret_typ == topk_enum::kReturnMask) {
workspace_size += PadBytes(sizeof(int) * batch_size * k, alignment);
}
workspace = resource.get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
char* workspace_curr_ptr = workspace.dptr_;
sorted_dat = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr),
Shape1(src.Size()), s); // contain sorted dat
workspace_curr_ptr += PadBytes(sizeof(DType) * src.Size(), alignment);
indices = Tensor<xpu, 1, int>(reinterpret_cast<int*>(workspace_curr_ptr),
Shape1(src.Size()), s); // indices in the original matrix
workspace_curr_ptr += PadBytes(sizeof(int) * src.Size(), alignment);
if (param.ret_typ == topk_enum::kReturnMask) {
sel_indices = Tensor<xpu, 1, int>(reinterpret_cast<int*>(workspace_curr_ptr),
Shape1(batch_size * k), s);
workspace_curr_ptr += PadBytes(sizeof(int) * batch_size * k, alignment);
CHECK_EQ(sel_indices.CheckContiguous(), true);
}
if (std::is_same<xpu, cpu>::value) {
Tensor<xpu, 1, DType> flattened_data;
if (do_transpose) {
flattened_data = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr),
Shape1(src.Size()), s);
workspace_curr_ptr += sizeof(DType) * src.Size();
flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
CHECK_EQ(flattened_data.CheckContiguous(), true);
} else {
flattened_data = src.FlatTo1D<xpu, DType>(s);
}
// `temp_workspace` stores the flattened data
temp_workspace = Tensor<xpu, 1, char>(reinterpret_cast<char*>(flattened_data.dptr_),
Shape1(sizeof(DType)*src.Size()), s);
CHECK_EQ(temp_workspace.CheckContiguous(), true);
} else {
if (do_transpose) {
sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
} else {
sorted_dat = reshape(dat, Shape1(src.Size()));
}
CHECK_EQ(sorted_dat.CheckContiguous(), true);
temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space
workspace_curr_ptr += temp_size;
}
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size * element_num, 1, 0, 1,
kWriteTo, indices.dptr_);
CHECK_EQ(indices.CheckContiguous(), true);
// 2. Perform inplace batch sort.
// After sorting, each batch in `sorted_dat` will be sorted in the corresponding order
// up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat`
// `temp_workspace` is used to store the flattend source data for CPU device, and it's used as
// a temporal buffer for GPU device.
TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s);
// 3. Assign results to the ret blob
// When returning indices, only update(modulo) required elements instead of full elements
// to avoid redundant calculation.
// Cast `ret_indices` from int to real_t could introduce conversion error when the element_num
// is large enough.
if (param.ret_typ == topk_enum::kReturnMask) {
Tensor<xpu, 1, DType> ret_mask = ret[0].FlatTo1D<xpu, DType>(s);
ret_mask = scalar<DType>(0);
sel_indices = reshape(slice<1>(
inplace_reshape(indices,
Shape2(batch_size,
element_num)), 0, k),
Shape1(batch_size * k));
if (do_transpose) {
mxnet::TShape src_shape = src.shape_.FlatTo3D(axis);
CHECK_EQ(sel_indices.CheckContiguous(), true);
sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]),
Shape3(0, 2, 1));
}
if (req[0] == kNullOp) {
return;
} else if (req[0] == kWriteTo) {
mxnet_op::Kernel<fill_ind_to_one, xpu>::Launch(s, batch_size * k,
sel_indices.dptr_, ret_mask.dptr_);
} else {
LOG(FATAL) << "req=" << req[0] << " is not supported yet.";
}
} else if (param.ret_typ == topk_enum::kReturnIndices) {
if (do_transpose) {
Tensor<xpu, 3, IDType> ret_indices = ret[0].FlatTo3D<xpu, IDType>(axis, axis, s);
ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(transpose(
slice<2>(inplace_reshape(indices,
Shape3(ret_indices.shape_[0],
ret_indices.shape_[2],
element_num)),
0, k),
Shape3(0, 2, 1)), element_num)));
} else {
Tensor<xpu, 2, IDType> ret_indices =
ret[0].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(slice<1>(
inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k),
element_num)));
}
} else {
if (do_transpose) {
Tensor<xpu, 3, DType> ret_value = ret[0].FlatTo3D<xpu, DType>(axis, axis, s);
Tensor<xpu, 3, IDType> ret_indices = ret[1].FlatTo3D<xpu, IDType>(axis, axis, s);
ASSIGN_DISPATCH(ret_value, req[0], transpose(
slice<2>(inplace_reshape(sorted_dat,
Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)),
0, k), Shape3(0, 2, 1)));
ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(transpose(
slice<2>(inplace_reshape(indices,
Shape3(ret_indices.shape_[0],
ret_indices.shape_[2],
element_num)),
0, k), Shape3(0, 2, 1)), element_num)));
} else {
Tensor<xpu, 2, DType> ret_value =
ret[0].get_with_shape<xpu, 2, DType>(Shape2(batch_size, k), s);
Tensor<xpu, 2, IDType> ret_indices =
ret[1].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
ASSIGN_DISPATCH(ret_value, req[0],
slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k));
ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(slice<1>(
inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num)));
}
}
}
template<typename xpu>
void TopK(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnBoth) {
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH(param.dtype, IDType, {
TopKImpl<xpu, DType, IDType>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param);
})
});
} else {
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
TopKImpl<xpu, DType, int>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param);
});
}
}
template<typename xpu>
void Sort(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const SortParam& param = nnvm::get<SortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnValue;
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
TopKImpl<xpu, DType, int>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param);
});
}
template<typename xpu>
void ArgSort(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.dtype = param.dtype;
topk_param.ret_typ = topk_enum::kReturnIndices;
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH(param.dtype, IDType, {
TopKImpl<xpu, DType, IDType>(ctx.run_ctx,
ctx.requested[0], req, inputs[0], outputs, topk_param);
});
});
}
template<typename xpu, typename DType, typename IDType>
void TopKBackwardImpl(const OpContext &ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const TopKParam& param) {
CHECK_NE(req[0], kWriteInplace);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.run_ctx.get_stream<xpu>();
CHECK(param.ret_typ == topk_enum::kReturnValue || param.ret_typ == topk_enum::kReturnBoth);
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
mxnet::TShape target_shape;
ParseTopKParam(outputs[0].shape_, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDType>())
<< "'IDType' does not have a sufficient precision to represent the indices of the input array. "
<< "The total element_num is " << element_num << ", but the selected IDType can only represent "
<< mxnet::common::MaxIntegerValue<IDType>() << " elements";
Tensor<xpu, 1, int> workspace =
ctx.requested[0].get_space_typed<xpu, 1, int>(Shape1(batch_size * k + batch_size), s);
Tensor<xpu, 1, int> sel_indices =
Tensor<xpu, 1, int>(workspace.dptr_, Shape1(batch_size * k), s);
Tensor<xpu, 1, int> batch_shift =
Tensor<xpu, 1, int>(workspace.dptr_ + batch_size * k, Shape1(batch_size), s);
Tensor<xpu, 2, DType> out_grad =
inputs[0].get_with_shape<xpu, 2, DType>(Shape2(inputs[0].shape_.Size(), 1), s);
Tensor<xpu, 2, DType> in_grad =
outputs[0].get_with_shape<xpu, 2, DType>(Shape2(outputs[0].shape_.Size(), 1), s);
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size, 1, 0, element_num, kWriteTo,
batch_shift.dptr_);
if (do_transpose) {
Tensor<xpu, 1, IDType> indices = inputs[2].FlatTo1D<xpu, IDType>(s);
mxnet::TShape src_shape = outputs[0].shape_.FlatTo3D(axis);
sel_indices = reshape(transpose(
broadcast_to(inplace_reshape(batch_shift,
Shape3(src_shape[0], src_shape[2], 1)),
mxnet::TShape(Shape3(src_shape[0], src_shape[2], k))),
Shape3(0, 2, 1)),
Shape1(batch_size * k));
sel_indices += tcast<int>(indices);
sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]),
Shape3(0, 2, 1));
} else {
Tensor<xpu, 2, IDType> indices =
inputs[2].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
sel_indices = reshape(tcast<int>(indices) +
broadcast_to(inplace_reshape(batch_shift, Shape2(batch_size, 1)),
mxnet::TShape(Shape2(batch_size, k))),
Shape1(batch_size * k));
}
CHECK_EQ(sel_indices.CheckContiguous(), true);
if (kWriteTo == req[0] || kAddTo == req[0]) {
if (kWriteTo == req[0]) {
in_grad = scalar<DType>(0);
}
mxnet_op::Kernel<fill_ind, xpu>::Launch(s, batch_size * k,
sel_indices.dptr_,
out_grad.dptr_,
req[0],
in_grad.dptr_);
} else {
LOG(FATAL) << "Not Implemented!";
}
}
template<typename xpu>
void TopKBackward_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnBoth) {
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH(param.dtype, IDType, {
TopKBackwardImpl<xpu, DType, IDType>(ctx, inputs, req, outputs, param);
});
});
} else if (param.ret_typ == topk_enum::kReturnValue) {
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
TopKBackwardImpl<xpu, DType, int>(ctx, inputs, req, outputs, param);
});
} else {
LOG(FATAL) << "Not Implemented";
}
}
inline uint32_t TopKNumOutputs(const NodeAttrs& attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
return static_cast<uint32_t>(1);
} else {
return static_cast<uint32_t>(2);
}
}
inline uint32_t TopKNumVisibleOutputs(const NodeAttrs& attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnBoth) {
return static_cast<uint32_t>(2);
} else {
return static_cast<uint32_t>(1);
}
}
inline bool TopKType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
int data_type = -1;
size_t in_size = in_attrs->size();
size_t out_size = out_attrs->size();
CHECK_EQ(in_size, 1);
CHECK(out_size == 1 || out_size == 2);
if (out_size > 1) {
if (param.ret_typ == topk_enum::kReturnValue) {
CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32))
<< "Failed to set the type of ret_indices.";
} else {
CHECK(type_assign(&(*out_attrs)[1], param.dtype))
<< "Failed to set the type of ret_indices.";
}
}
if (param.ret_typ == topk_enum::kReturnIndices) {
CHECK(type_assign(&(*out_attrs)[0], param.dtype))
<< "Failed to set the type of ret_indices.";
} else {
CHECK(type_assign(&data_type, (*in_attrs)[0])) << "Incompatible dtype of input, in_attrs[0]="
<< (*in_attrs)[0];
CHECK(type_assign(&data_type, (*out_attrs)[0])) << "Incompatible dtype of output, out_attrs[0]="
<< (*out_attrs)[0];
CHECK(type_assign(&(*in_attrs)[0], data_type)) << "Incompatible dtype of input, in_attrs[0]="
<< (*in_attrs)[0];
CHECK(type_assign(&(*out_attrs)[0], data_type)) << "Incompatible dtype of output, out_attrs[0]="
<< (*out_attrs)[0];
if (data_type == -1) return false;
}
return true;
}
inline bool TopKShapeImpl(const TopKParam& param,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
CHECK_EQ(out_attrs->size(), 1U);
} else {
CHECK_EQ(out_attrs->size(), 2U);
}
mxnet::TShape& in_shape = (*in_attrs)[0];
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
mxnet::TShape target_shape;
ParseTopKParam(in_shape, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape);
} else {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape);
SHAPE_ASSIGN_CHECK(*out_attrs, 1, target_shape);
}
return true;
}
inline bool TopKShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
return TopKShapeImpl(param, in_attrs, out_attrs);
}
inline bool SortType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
int data_type = -1;
size_t in_size = in_attrs->size();
size_t out_size = out_attrs->size();
CHECK_EQ(in_size, 1);
CHECK_EQ(out_size, 2);
CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32))
<< "Failed to set the type of ret_indices to int32.";
CHECK(type_assign(&data_type, (*in_attrs)[0])) << "Incompatible dtype of input, in_attrs[0]="
<< (*in_attrs)[0];
CHECK(type_assign(&data_type, (*out_attrs)[0])) << "Incompatible dtype of output, out_attrs[0]="
<< (*out_attrs)[0];
CHECK(type_assign(&(*in_attrs)[0], data_type)) << "Incompatible dtype of input, in_attrs[0]="
<< (*in_attrs)[0];
CHECK(type_assign(&(*out_attrs)[0], data_type)) << "Incompatible dtype of output, out_attrs[0]="
<< (*out_attrs)[0];
if (data_type == -1) return false;
return true;
}
inline bool SortShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SortParam& param = nnvm::get<SortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnValue;
return TopKShapeImpl(topk_param, in_attrs, out_attrs);
}
inline bool ArgSortType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
CHECK(type_assign(&(*out_attrs)[0], param.dtype))
<< "Failed to set the type of ret_indices to int32.";
return true;
}
inline bool ArgSortShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnIndices;
return TopKShapeImpl(topk_param, in_attrs, out_attrs);
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
|
l7_setup.c | /*
* Copyright (c) 2011-2019, Triad National Security, LLC.
* All rights Reserved.
*
* CLAMR -- LA-CC-11-094
*
* Copyright 2011-2019. Triad National Security, LLC. This software was produced
* under U.S. Government contract 89233218CNA000001 for Los Alamos National
* Laboratory (LANL), which is operated by Triad National Security, LLC
* for the U.S. Department of Energy. The U.S. Government has rights to use,
* reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR
* TRIAD NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR
* ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified
* to produce derivative works, such modified software should be clearly marked,
* so as not to confuse it with the version available from LANL.
*
* Additionally, redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Triad National Security, LLC, Los Alamos
* National Laboratory, LANL, the U.S. Government, nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE TRIAD NATIONAL SECURITY, LLC AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
* NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL TRIAD NATIONAL
* SECURITY, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "l7.h"
#include "l7p.h"
#include <stdlib.h>
#define L7_LOCATION "L7_SETUP"
int L7_Setup(
const int num_base,
const int my_start_index,
const int num_indices_owned,
int *indices_needed,
const int num_indices_needed,
int *l7_id
)
{
/* Purpose
* =======
* L7_Setup is used to setup the update/scatter database as
* defined by the global indexing scheme. Each process passes
* in parameters which define the indices it owns (i.e. as
* defined by 'my_start_index' and 'num_indices_owned') and
* lists the indices it needs ('indices_needed'). From this,
* a database is defined that allows subsequent calls to
* L7_Update.
*
* Notes:
* ======
* 1) Assumes a global indexing set, linearly decomposed across
* all processes.
*
* Arguments
* =========
* num_base (input) const L7_INT
* global indexing set starts with 1 (Fortran)
* or with 0 (C)
*
* my_start_index (input) const L7_INT
* Starting index number of calling process
* in global indexing set.
*
* num_indices_owned (input) const L7_INT
* Number of indices owned by calling process.
*
* indices_needed (input) const L7_INT*
* Array containing indices needed by
* calling process.
*
* num_indices_needed (input) const L7_INT
* Number of indices of interest listed
* in array 'num_indices_needed'.
*
* l7_id (input/output) int*
* Handle to database to be setup.
*
* 0: L7 sets up a new database, and
* assigns it a value.
* > 0: L7 resets existing database with
* input information. That is, it reuses
* the allocated memory.
* < 0: An error is returned.
*
* Notes:
* =====
* 1) The handling of 0-based arrays for C and 1-based arrays for Fortran
* is handled in L7_Setup. This is done by taking the input global
* indices stored in 'indices_global_to_send' and converting them to
* 1-based and storing them in 'indices_local_to_send'.
*
* 2) The indices are handled as 4-byte integers.
*
* 3) Serial compilation creates a no-op.
*
* Program Flow
* ============
* 0) Check input for basic validity.
* 1) Set communication parameters within database.
* 2) Deternine processes this pe receives from.
* 3) Determine the number of processes this pe sends to.
* 4) Send number of as well as the indices needed from each sending process.
* 5) Set up array containing the pes this pe sends indices to.
* 6) Set up array containing the indices this pe sends to others.
*/
/*
* Local variables.
*/
int
ierr; /* Error code for return */
#ifdef HAVE_MPI
int
base_adj, /* 0 or 1 based arrays adjustment */
count_total,
i, j, /* Counters */
max_sizeof_type,
num_msgs, /* Number of sends and recvs needed */
numpes, /* Alias for l7_id_db.numpes. */
num_indices_acctd_for,
num_outstanding_requests = 0,
num_sends,
offset,
penum, /* Alias for l7_id_db.penum. */
*pi4_in, /* (int *)l7.receive_buffer */
*pi4_out, /* (int *)l7.send_buffer */
send_buffer_bytes_needed, /* Buffer space requirement. */
start_indices_needed,
this_index; /* Offset into indexing set. */
l7_id_database
*l7_id_db;
MPI_Request
*mpi_request; /* Local alias for l7_id_db->mpi_request. */
MPI_Status
*mpi_status; /* Local alias for l7_id_db->mpi_status. */
#if defined (_L7_DEBUG)
int
k; /* Counter */
#endif
/*
* Executable Statements
*/
if (! l7.mpi_initialized){
return(0);
}
if (l7.initialized != 1){
ierr = -1;
L7_ASSERT( l7.initialized == 1, "L7 not initialized", ierr);
}
/*
* Check input
*/
if (num_base){
base_adj = 1;
}
else {
base_adj = 0;
}
if (my_start_index < 0){
ierr = -1;
L7_ASSERT( my_start_index >= 0, "my_start_index < 0", ierr);
}
if (num_indices_owned < 0){
ierr = -1;
L7_ASSERT( num_indices_owned >= 0, "num_indices_owned < 0", ierr);
}
if (num_indices_needed > 0){
if (indices_needed == NULL){
ierr = -1;
L7_ASSERT( (int *)indices_needed != NULL,
"indices_needed == NULL", ierr);
}
}
if (*l7_id < 0){
ierr = *l7_id;
L7_ASSERT( *l7_id >=0,
"L7 Id must be either 0 (new id) or > 0 (existing id)",
ierr);
}
/*
* Setup database structure.
*/
if (*l7_id != 0){
/*
* Find it in the database and update based on new input.
*/
if (l7.first_db == NULL){
L7_ASSERT(l7.first_db != NULL,
"Uninitialized l7_id input, but no ids in database",
ierr);
}
l7_id_db = l7.first_db;
while (l7_id_db){
if (l7_id_db->l7_id == *l7_id)
break;
l7_id_db = l7_id_db->next_db;
}
if (l7.first_db == NULL){
ierr = -1;
L7_ASSERT( l7.first_db != NULL,
"Uninitialized l7_id input, but not found in this list",
ierr);
}
}
else{
/*
* Allocate new database, insert into linked list.
*/
if (l7.num_dbs >= L7_MAX_NUM_DBS){
ierr = -1;
L7_ASSERT(l7.num_dbs < L7_MAX_NUM_DBS,
"Too many L7 databases allocataed",
ierr);
}
l7_id_db = (l7_id_database*)calloc(1L, sizeof(l7_id_database) );
if (l7_id_db == NULL){
ierr = -1;
L7_ASSERT( l7_id_db != NULL, "Failed to allocate new database",
ierr);
}
if ( !(l7.first_db) ){
l7.first_db = l7_id_db;
l7.last_db = l7_id_db;
l7_id_db->next_db = NULL; /* Paranoia */
l7_id_db->l7_id = 1;
l7.num_dbs = 1;
}
else{
/*
* Assign a l7_id.
*/
l7_id_db->l7_id = l7.last_db->l7_id + 1;
/*
* Reset links.
*/
l7.last_db->next_db = l7_id_db;
l7.last_db = l7_id_db;
l7.num_dbs++;
}
*l7_id = l7_id_db->l7_id;
/*
* Initialize some parameters.
*/
l7_id_db->recv_counts_len = 0;
l7_id_db->recv_from_len = 0;
l7_id_db->send_to_len = 0;
l7_id_db->send_counts_len = 0;
l7_id_db->indices_to_send_len = 0;
l7_id_db->mpi_request_len = 0;
l7_id_db->mpi_status_len = 0;
}
/*
* Store input in database.
*/
l7_id_db->my_start_index = my_start_index;
l7_id_db->num_indices_owned = num_indices_owned;
if ( (l7_id_db->indices_needed_len < num_indices_needed ) &&
(num_indices_needed > 0) ){
if (l7_id_db->indices_needed)
free(l7_id_db->indices_needed);
l7_id_db->indices_needed =
(int *)calloc((unsigned long long)num_indices_needed, sizeof(int) );
if (l7_id_db->indices_needed == NULL){
ierr = -1;
L7_ASSERT( (int*)(l7_id_db->indices_needed) != NULL,
"Memory failure for indices_needed",
ierr);
}
l7_id_db->indices_needed_len = num_indices_needed;
}
#ifdef _OPENMP
#pragma omp parallel for
#else
#ifdef _OPENMP_SIMD
#pragma omp simd
#endif
#endif
for (i=0; i<num_indices_needed; i++){
l7_id_db->indices_needed[i] = indices_needed[i];
}
l7_id_db->num_indices_needed = num_indices_needed;
ierr = MPI_Comm_rank (MPI_COMM_WORLD, &l7_id_db->penum );
L7_ASSERT( ierr == MPI_SUCCESS, "MPI_Comm_rank", ierr);
ierr = MPI_Comm_size (MPI_COMM_WORLD, &l7_id_db->numpes );
L7_ASSERT( ierr == MPI_SUCCESS, "MPI_Comm_size", ierr);
l7.penum = l7_id_db->penum;
/* Local shorthand */
numpes = l7_id_db->numpes;
penum = l7_id_db->penum;
if (numpes == 1){
return(0);
}
/*
* Create array containing starting (global) index numbers
* for all processes.
*
* 1) Allgather num_indices_owned.
* 2) Scan to create starting_index.
* 3) Shift all array elements up 1 position.
* 4) Set starting_indices[0] = 0.
*
* The latter two steps allows arrays to be used as below.
*/
l7_id_db->starting_indices =
(int *)calloc((unsigned long long)(numpes+1), sizeof(int));
if(l7_id_db->starting_indices == NULL){
ierr = -1;
L7_ASSERT(l7_id_db->starting_indices != NULL,
"No memory for l7_id_db->starting_indices", ierr);
}
ierr = MPI_Allgather( &(l7_id_db->num_indices_owned), 1, MPI_INT,
&(l7_id_db->starting_indices[1]), 1, MPI_INT,
MPI_COMM_WORLD);
L7_ASSERT( ierr == MPI_SUCCESS, "MPI_Allgather (num_indices_owned)",
ierr);
l7_id_db->starting_indices[0] = 0;
// l7_id_db->starting_indices[0] = 1;
for (i=0; i<numpes; i++)
l7_id_db->starting_indices[i+1] += l7_id_db->starting_indices[i];
/*
* Determine the number of processes this pe receives from.
*/
l7_id_db->num_recvs = 0;
start_indices_needed = -1;
this_index = 0;
if (num_indices_needed > 0){
for (j=0; j<numpes; j++){
if ( indices_needed[this_index] >= l7_id_db->starting_indices[j]){
if (indices_needed[this_index] < l7_id_db->starting_indices[j+1]){
l7_id_db->num_recvs++;
#if defined _L7_DEBUG
printf("[pe %d] Found first one on pe %d. \n", penum, j);
#endif
/* Skip through all the rest on pe j. */
/* SKG - Update order to silence valgrind. Don't know if
* this is okay... */
while ( ( this_index < num_indices_needed) &&
( indices_needed[this_index] < l7_id_db->starting_indices[j+1] ) )
this_index++;
/* Remember where we found the first one. */
if ( start_indices_needed == -1)
start_indices_needed = j;
if (this_index == num_indices_needed)
break;
}
}
}
if (l7_id_db->num_recvs == 0){
ierr = -1;
L7_ASSERT(l7_id_db->num_recvs != 0, "No indices found", ierr);
}
}
if (this_index != num_indices_needed){
printf("[pe %d] ERROR -- can't find all the indices I need. I have %d, need %d\n",
penum, this_index, num_indices_needed);
}
#if defined _L7_DEBUG
printf("[pe %d] l7_id_dp->num_recvs = %d\n",
penum, l7_id_db->num_recvs);
#endif
/*
* Allocate space for counts for each pe sending to this one.
*/
if (l7_id_db->num_recvs > l7_id_db->recv_counts_len){
if (l7_id_db->recv_counts)
free(l7_id_db->recv_counts);
l7_id_db->recv_counts =
(int *)calloc((unsigned long long)l7_id_db->num_recvs, sizeof(int) );
if (l7_id_db->recv_counts == NULL){
ierr = -1;
L7_ASSERT(l7_id_db->recv_counts != NULL,
"No space for l7_id_db->recv_counts", ierr);
}
l7_id_db->recv_counts_len = l7_id_db->num_recvs;
int num_recvs = l7_id_db->num_recvs; // for vectorization
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i=0; i<num_recvs; i++)
l7_id_db->recv_counts[i] = 0; /* calloc does not guarantee = 0. */
}
if (l7_id_db->num_recvs > l7_id_db->recv_from_len){
if (l7_id_db->recv_from)
free(l7_id_db->recv_from);
l7_id_db->recv_from =
(int *)calloc((unsigned long long)l7_id_db->num_recvs, sizeof(int) );
if (l7_id_db->recv_from == NULL){
ierr = -1;
L7_ASSERT(l7_id_db->recv_from != NULL,
"No space for l7_id_db->recv_from", ierr);
}
l7_id_db->recv_from_len = l7_id_db->num_recvs;
int num_recvs = l7_id_db->num_recvs; // for vectorization
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i=0; i<num_recvs; i++)
l7_id_db->recv_from[i] = -999;
}
/*
* Determine process and the number of indices this pe recvs from it.
*/
if (num_indices_needed > 0){
this_index = 0;
num_indices_acctd_for = 0;
i=0;
for (j=start_indices_needed; j<numpes; j++){
if (indices_needed[this_index] >= l7_id_db->starting_indices[j] ){
if (indices_needed[this_index] < l7_id_db->starting_indices[j+1]){
/* Found the first one on pe j. */
l7_id_db->recv_from[i] = j;
l7_id_db->recv_counts[i] = 1;
num_indices_acctd_for++;
if (num_indices_acctd_for == num_indices_needed)
break;
this_index++;
/* SKG - Update order to silence valgrind. Don't know if
* this is okay... */
while ( ( num_indices_acctd_for < num_indices_needed ) &&
( indices_needed[this_index] < l7_id_db->starting_indices[j+1] )) {
/* Find the rest on pe j. */
l7_id_db->recv_counts[i]++;
this_index++;
num_indices_acctd_for++;
}
if (num_indices_acctd_for == num_indices_needed)
break;
i++;
}
}
}
if (num_indices_needed != num_indices_acctd_for){
ierr = -1;
L7_ASSERT(num_indices_needed == num_indices_acctd_for,
"Failed to find all the needed indices", ierr);
}
}
/*
* Determine number of processes for which this pe owns indices
* those pes need. This is done use a reduction (MPI_Allreduce).
*/
if (l7.sizeof_send_buffer < 2 * numpes * (int)sizeof(int)){
if (l7.send_buffer)
free(l7.send_buffer);
l7.send_buffer = calloc ((unsigned long long)(2*numpes), sizeof(int));
if (l7.send_buffer == NULL){
ierr = -1;
L7_ASSERT(l7.send_buffer != NULL, "No memory for send buffer", ierr);
}
l7.sizeof_send_buffer = 2 * numpes * (int)sizeof(int);
}
pi4_in = (int*)l7.send_buffer;
pi4_out = &pi4_in[numpes];
for (i=0; i<numpes; i++)
pi4_in[i] = 0;
for (i=0; i<l7_id_db->num_recvs; i++)
pi4_in[l7_id_db->recv_from[i]] = 1;
ierr = MPI_Allreduce(pi4_in, pi4_out, numpes, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Allreduce ( l7_id_db->recv_from )", ierr);
l7_id_db->num_sends = pi4_out[penum];
#if defined _L7_DEBUG
printf("[pe %d] l7_id_db->num_sends = %d \n", penum, l7_id_db->num_sends);
#endif
/*
* Allocate request and status arrays.
*/
num_msgs = ( 2 * l7_id_db->num_recvs ) + l7_id_db->num_sends;
/* Ensure enough outstanding messages for L7_Update_pack model. */
if (num_msgs < (L7_MIN_MPI_REQS * l7_id_db->num_recvs ) )
num_msgs = L7_MIN_MPI_REQS * l7_id_db->num_recvs;
if (num_msgs > l7_id_db->mpi_request_len) {
if (l7_id_db->mpi_request)
free(l7_id_db->mpi_request);
l7_id_db->mpi_request = (MPI_Request *) calloc ((unsigned long long)num_msgs, sizeof(MPI_Request));
if (l7_id_db->mpi_request == NULL){
ierr = -1;
L7_ASSERT(l7_id_db->mpi_request != NULL,
"Allocation of l7_id_db->mpi_request failed", ierr);
}
l7_id_db->mpi_request_len = num_msgs;
}
if (num_msgs > l7_id_db->mpi_status_len){
if (l7_id_db->mpi_status)
free(l7_id_db->mpi_status);
l7_id_db->mpi_status = (MPI_Status *) calloc((unsigned long long)num_msgs, sizeof(MPI_Status) );
if (l7_id_db->mpi_status == NULL){
ierr = -1;
L7_ASSERT(l7_id_db->mpi_status != NULL,
"Allocation of l7_id_db->mpi_status failed", ierr);
}
l7_id_db->mpi_status_len = num_msgs;
}
/* Local shorthand */
mpi_request = l7_id_db->mpi_request;
mpi_status = l7_id_db->mpi_status;
/*
* Send number of indices needed from each sending process.
*/
num_outstanding_requests = 0;
for (i=0; i<l7_id_db->num_recvs; i++){
#if defined _L7_DEBUG
printf("[pe %d] recv_counts[%d] = %d to pe %d \n", penum, i,
l7_id_db->recv_counts[i], l7_id_db->recv_from[i] );
#endif
ierr = MPI_Isend(&l7_id_db->recv_counts[i], 1, MPI_INT,
l7_id_db->recv_from[i], L7_SETUP_SEND_COUNT_TAG,
MPI_COMM_WORLD, &mpi_request[num_outstanding_requests++] );
L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Isend (recv_counts[i] )",
ierr);
}
/*
* Receive counts for the processes to which this pe sends.
* This pe doesn't know who needs what it has, so we must
* use wildcard receives.
*/
if (l7_id_db->num_sends > l7_id_db->send_counts_len){
if (l7_id_db->send_counts)
free(l7_id_db->send_counts);
l7_id_db->send_counts = (int *) calloc((unsigned long long)l7_id_db->num_sends, sizeof(int) );
if (l7_id_db->send_counts == NULL){
ierr = -1;
L7_ASSERT(l7_id_db->send_counts != NULL,
"Failed to allocate l7_id_db->send_counts", ierr);
}
l7_id_db->send_counts_len = l7_id_db->num_sends;
}
if (l7_id_db->num_sends > l7_id_db->send_to_len){
if (l7_id_db->send_to)
free(l7_id_db->send_to);
l7_id_db->send_to = (int *) calloc((unsigned long long)l7_id_db->num_sends, sizeof(int) );
if (l7_id_db->send_to == NULL){
ierr = -1;
L7_ASSERT(l7_id_db->send_to != NULL,
"Failed to allocate l7_id_db->send_to", ierr);
}
l7_id_db->send_to_len = l7_id_db->num_sends;
}
for (i=0; i<l7_id_db->num_sends; i++){
ierr = MPI_Irecv(&l7_id_db->send_counts[i], 1, MPI_INT,
MPI_ANY_SOURCE, L7_SETUP_SEND_COUNT_TAG, MPI_COMM_WORLD,
&mpi_request[num_outstanding_requests++] );
L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Irecv ( indices_needed[i] )", ierr);
}
if (num_outstanding_requests > 0){
ierr = MPI_Waitall(num_outstanding_requests, mpi_request, mpi_status);
L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Waitall ( counts )", ierr);
}
num_outstanding_requests = 0;
/*
* Determine which processes sent the above messages.
* These are the 'send_to' processes.
*/
offset = l7_id_db->num_recvs;
for (i=0; i<l7_id_db->num_sends; i++){
l7_id_db->send_to[i] = mpi_status[offset+i].MPI_SOURCE;
}
/*
* Allocate space for 'indices_global_to_send' and
* 'indices_local_to_send'.
*/
count_total = 0;
for (i=0; i<l7_id_db->num_sends; i++){
count_total += l7_id_db->send_counts[i];
}
if (count_total > l7_id_db->indices_to_send_len){
if (l7_id_db->indices_global_to_send)
free(l7_id_db->indices_global_to_send);
l7_id_db->indices_global_to_send = (int *) calloc((unsigned long long)count_total, sizeof(int) );
if (l7_id_db->indices_global_to_send == NULL){
ierr = -1;
L7_ASSERT(l7_id_db->indices_global_to_send != NULL,
"No memory for l7_id_db->indices_global_to_send.", ierr);
}
if (l7_id_db->indices_local_to_send)
free(l7_id_db->indices_local_to_send);
l7_id_db->indices_local_to_send = (int *) calloc((unsigned long long)count_total, sizeof(int) );
if (l7_id_db->indices_local_to_send == NULL){
ierr = -1;
L7_ASSERT(l7_id_db->indices_local_to_send != NULL,
"No memory for l7_id_db->indices_local_to_send.", ierr);
}
l7_id_db->indices_to_send_len = count_total;
}
/*
* Send (global) indices needed from each sending process.
*/
offset = 0;
for (i=0; i<l7_id_db->num_recvs; i++){
#if defined _L7_DEBUG
printf("[pe %d] Sending %d indices to pe %d. \n",
penum, l7_id_db->recv_counts[i], l7_id_db->recv_from[i] );
for (k=offset; k<offset+l7_id_db->recv_counts[i]; k++){
printf(" index[%d] = %d \n", k, l7_id_db->indices_needed[k] );
}
#endif
ierr = MPI_Isend(&l7_id_db->indices_needed[offset],
l7_id_db->recv_counts[i], MPI_INT,
l7_id_db->recv_from[i], L7_SETUP_INDICES_NEEDED_TAG,
MPI_COMM_WORLD, &mpi_request[num_outstanding_requests++] );
L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Isend ( indices_needed[i] )", ierr);
offset+=l7_id_db->recv_counts[i];
}
/*
* Receive (global) indices needed by the pes to which this pe sends.
* Note that these receives are from expected sources.
*/
offset = 0;
for (i=0; i<l7_id_db->num_sends; i++){
ierr = MPI_Irecv(&l7_id_db->indices_global_to_send[offset],
l7_id_db->send_counts[i], MPI_INT,
l7_id_db->send_to[i], L7_SETUP_INDICES_NEEDED_TAG,
MPI_COMM_WORLD, &mpi_request[num_outstanding_requests++] );
L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Irecv ( indices_global_to_send )", ierr);
offset += l7_id_db->send_counts[i];
}
/*
* Complete indices communication.
*/
if (num_outstanding_requests > 0){
ierr = MPI_Waitall(num_outstanding_requests, mpi_request, mpi_status );
L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Waitall ( indices )", ierr);
}
#if defined _L7_DEBUG
ierr = MPI_Barrier(MPI_COMM_WORLD);
offset = 0;
for (j=0; j<numpes; j++){
if (penum == j){
for (i=0; i<l7_id_db->num_sends; i++){
printf("[pe %d] Recvd %d indices from pe %d. \n", penum,
l7_id_db->send_counts[i], l7_id_db->send_to[i] );
for (k=offset; k<offset+l7_id_db->send_counts[i]; k++){
printf(" index[%d] = %d \n",k l7_id_db->indices_global_to_send[k] );
}
offset += l7_id_db->send_counts[i];
}
}
sleep(1);
}
#endif
/* Create array of local indices corresponding to
* array of global indices requested. Note the
* conversion from 1-based indices to 0-based is
* accomplished here. (See note in header).
*/
offset = 0;
for (i=0; i<l7_id_db->num_sends; i++){
int counts = l7_id_db->send_counts[i]; // for vectorization
int adj = (int)(my_start_index) - base_adj; // for vectorization
#ifdef _OPENMP
#pragma omp parallel for
#else
#ifdef _OPENMP_SIMD
#pragma omp simd
#endif
#endif
for (j=0; j<counts; j++){
l7_id_db->indices_local_to_send[offset+j] =
l7_id_db->indices_global_to_send[offset+j] - adj;
}
offset += counts;
}
#if defined _L7_DEBUG
ierr = MPI_Barrier(MPI_COMM_WORLD);
for (i=0; i<numpes; i++){
if (penum == i){
for (j=0; j<l7_id_db->num_sends; j++){
printf("[pe %d] send %d indices to pe %d \n", penum,
l7_id_db->send_counts[j], l7_id_db->send_to[] );
ierr = MPI_Barrier(MPI_COMM_WORLD);
}
}
}
flush(stdout);
ierr = MPI_Barrier(MPI_COMM_WORLD);
L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Barrier failure", ierr);
for (i=0; i<numpes; i++){
if (penum == i){
printf("----------------------------------------------------\n")
for (j=0; j<l7_id_db->num_sends; j++){
printf("[pe %d] Send (index %d) to pe %d. \n",penum,
l7_id_db->indices_global_to_send[j], l7_id_db->send_to[j] );
}
for (j=0; j<l7_id_db->num_recvs; j++){
printf("[pe %d] Recving (index %d) from pe %d. \n",penum,
l7_id_db->indices_needed[j], l7_id_db->recv_from[j] );
}
printf("----------------------------------------------------\n")
fflush(stdout);
}
sleep(2);
}
#endif /* _L7_DEBUG */
/*
* Ensure buffer available for data to be sent.
*/
send_buffer_bytes_needed = 0;
num_sends = l7_id_db->num_sends;
max_sizeof_type = sizeof(double);
for (i=0; i<num_sends; i++)
send_buffer_bytes_needed += l7_id_db->send_counts[i] * max_sizeof_type;
if (send_buffer_bytes_needed > l7.sizeof_send_buffer ){
if (l7.send_buffer)
free(l7.send_buffer);
l7.send_buffer = (char *)calloc((unsigned long long)send_buffer_bytes_needed, sizeof (char) );
if (l7.send_buffer == NULL){
ierr = -1;
L7_ASSERT(l7.send_buffer != NULL, "No memory for send buffer", ierr);
}
l7.sizeof_send_buffer = send_buffer_bytes_needed;
}
/*
* Message tag management
*/
l7_id_db->this_tag_update = L7_UPDATE_TAGS_MIN;
/*
* Database is setup for this l7_id -- return.
*/
#endif /* HAVE_MPI */
ierr = L7_OK;
return(ierr);
} /* End L7_Setup */
void L7_SETUP(
const int *my_start_index,
const int *num_indices_owned,
int *indices_needed,
const int *num_indices_needed,
int *l7_id
)
{
L7_Setup(0, *my_start_index, *num_indices_owned, indices_needed, *num_indices_needed, l7_id);
}
|
requantize_relu_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void requantize_relu_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& scale_in_data, const Mat& scale_out_data, const Mat& bias_data, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int size = w * h;
int outc = top_blob.c;
int out_elempack = top_blob.elempack;
int scale_in_data_size = scale_in_data.w;
int scale_out_data_size = scale_out_data.w;
int bias_data_size = bias_data.w;
// int8(relu(v * scale_in) * scale_out)
// int8_relu(v * (scale_in * scale_out))
// int8(relu(v * scale_in + bias) * scale_out)
// int8_relu(v * (scale_in * scale_out) + (bias * scale_out))
if (out_elempack == 8)
{
if (bias_data_size == 0)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc; q++)
{
const int* intptr0 = bottom_blob.channel(q * 2);
const int* intptr1 = bottom_blob.channel(q * 2 + 1);
signed char* ptr = top_blob.channel(q);
float32x4_t _scale_in0 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8);
float32x4_t _scale_in1 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8 + 4);
float32x4_t _scale_out0 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8);
float32x4_t _scale_out1 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8 + 4);
float32x4_t _scale0 = vmulq_f32(_scale_in0, _scale_out0);
float32x4_t _scale1 = vmulq_f32(_scale_in1, _scale_out1);
int i = 0;
#if __aarch64__
for (; i + 3 < size; i += 4)
{
float32x4_t _v00 = vcvtq_f32_s32(vld1q_s32(intptr0));
float32x4_t _v01 = vcvtq_f32_s32(vld1q_s32(intptr0 + 4));
float32x4_t _v02 = vcvtq_f32_s32(vld1q_s32(intptr0 + 8));
float32x4_t _v03 = vcvtq_f32_s32(vld1q_s32(intptr0 + 12));
float32x4_t _v10 = vcvtq_f32_s32(vld1q_s32(intptr1));
float32x4_t _v11 = vcvtq_f32_s32(vld1q_s32(intptr1 + 4));
float32x4_t _v12 = vcvtq_f32_s32(vld1q_s32(intptr1 + 8));
float32x4_t _v13 = vcvtq_f32_s32(vld1q_s32(intptr1 + 12));
_v00 = vmulq_f32(_v00, _scale0);
_v01 = vmulq_f32(_v01, _scale0);
_v02 = vmulq_f32(_v02, _scale0);
_v03 = vmulq_f32(_v03, _scale0);
_v10 = vmulq_f32(_v10, _scale1);
_v11 = vmulq_f32(_v11, _scale1);
_v12 = vmulq_f32(_v12, _scale1);
_v13 = vmulq_f32(_v13, _scale1);
vst1_s8(ptr, float2int8relu(_v00, _v10));
vst1_s8(ptr + 8, float2int8relu(_v01, _v11));
vst1_s8(ptr + 16, float2int8relu(_v02, _v12));
vst1_s8(ptr + 24, float2int8relu(_v03, _v13));
intptr0 += 16;
intptr1 += 16;
ptr += 32;
}
#endif // __aarch64__
for (; i < size; i++)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr0));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr1));
_v0 = vmulq_f32(_v0, _scale0);
_v1 = vmulq_f32(_v1, _scale1);
vst1_s8(ptr, float2int8relu(_v0, _v1));
intptr0 += 4;
intptr1 += 4;
ptr += 8;
}
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc; q++)
{
const int* intptr0 = bottom_blob.channel(q * 2);
const int* intptr1 = bottom_blob.channel(q * 2 + 1);
signed char* ptr = top_blob.channel(q);
float32x4_t _scale_in0 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8);
float32x4_t _scale_in1 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8 + 4);
float32x4_t _scale_out0 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8);
float32x4_t _scale_out1 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8 + 4);
float32x4_t _bias0 = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 8);
float32x4_t _bias1 = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 8 + 4);
float32x4_t _scale0 = vmulq_f32(_scale_in0, _scale_out0);
float32x4_t _scale1 = vmulq_f32(_scale_in1, _scale_out1);
_bias0 = vmulq_f32(_bias0, _scale_out0);
_bias1 = vmulq_f32(_bias1, _scale_out1);
int i = 0;
#if __aarch64__
for (; i + 3 < size; i += 4)
{
float32x4_t _v00 = vcvtq_f32_s32(vld1q_s32(intptr0));
float32x4_t _v01 = vcvtq_f32_s32(vld1q_s32(intptr0 + 4));
float32x4_t _v02 = vcvtq_f32_s32(vld1q_s32(intptr0 + 8));
float32x4_t _v03 = vcvtq_f32_s32(vld1q_s32(intptr0 + 12));
float32x4_t _v10 = vcvtq_f32_s32(vld1q_s32(intptr1));
float32x4_t _v11 = vcvtq_f32_s32(vld1q_s32(intptr1 + 4));
float32x4_t _v12 = vcvtq_f32_s32(vld1q_s32(intptr1 + 8));
float32x4_t _v13 = vcvtq_f32_s32(vld1q_s32(intptr1 + 12));
_v00 = vfmaq_f32(_bias0, _v00, _scale0);
_v01 = vfmaq_f32(_bias0, _v01, _scale0);
_v02 = vfmaq_f32(_bias0, _v02, _scale0);
_v03 = vfmaq_f32(_bias0, _v03, _scale0);
_v10 = vfmaq_f32(_bias1, _v10, _scale1);
_v11 = vfmaq_f32(_bias1, _v11, _scale1);
_v12 = vfmaq_f32(_bias1, _v12, _scale1);
_v13 = vfmaq_f32(_bias1, _v13, _scale1);
vst1_s8(ptr, float2int8relu(_v00, _v10));
vst1_s8(ptr + 8, float2int8relu(_v01, _v11));
vst1_s8(ptr + 16, float2int8relu(_v02, _v12));
vst1_s8(ptr + 24, float2int8relu(_v03, _v13));
intptr0 += 16;
intptr1 += 16;
ptr += 32;
}
#endif // __aarch64__
for (; i + 1 < size; i += 2)
{
#if __aarch64__
float32x4_t _v00 = vcvtq_f32_s32(vld1q_s32(intptr0));
float32x4_t _v01 = vcvtq_f32_s32(vld1q_s32(intptr0 + 4));
float32x4_t _v10 = vcvtq_f32_s32(vld1q_s32(intptr1));
float32x4_t _v11 = vcvtq_f32_s32(vld1q_s32(intptr1 + 4));
_v00 = vfmaq_f32(_bias0, _v00, _scale0);
_v01 = vfmaq_f32(_bias0, _v01, _scale0);
_v10 = vfmaq_f32(_bias1, _v10, _scale1);
_v11 = vfmaq_f32(_bias1, _v11, _scale1);
vst1_s8(ptr, float2int8relu(_v00, _v10));
vst1_s8(ptr + 8, float2int8relu(_v01, _v11));
intptr0 += 8;
intptr1 += 8;
ptr += 16;
#else // __aarch64__
asm volatile(
"pld [%0, #256] \n"
"vld1.s32 {d8-d11}, [%0 :128]! \n"
"pld [%1, #256] \n"
"vld1.s32 {d12-d15}, [%1 :128]! \n"
"vmov q0, %q8 \n"
"vmov q1, %q8 \n"
"vmov q2, %q9 \n"
"vmov q3, %q9 \n"
"vcvt.f32.s32 q4, q4 \n"
"vcvt.f32.s32 q5, q5 \n"
"vcvt.f32.s32 q6, q6 \n"
"vcvt.f32.s32 q7, q7 \n"
"veor q8, q8 \n" // _zero
"vmla.f32 q0, q4, %q6 \n"
"vmla.f32 q1, q5, %q6 \n"
"vmla.f32 q2, q6, %q7 \n"
"vmla.f32 q3, q7, %q7 \n"
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
"vcvtr.s32.f32 s8, s8 \n"
"vcvtr.s32.f32 s9, s9 \n"
"vcvtr.s32.f32 s10, s10 \n"
"vcvtr.s32.f32 s11, s11 \n"
"vcvtr.s32.f32 s12, s12 \n"
"vcvtr.s32.f32 s13, s13 \n"
"vcvtr.s32.f32 s14, s14 \n"
"vcvtr.s32.f32 s15, s15 \n"
"vqmovn.s32 d8, q0 \n"
"vqmovn.s32 d10, q1 \n"
"vqmovn.s32 d9, q2 \n"
"vqmovn.s32 d11, q3 \n"
"vqmovn.s16 d8, q4 \n"
"vqmovn.s16 d9, q5 \n"
"vmax.s8 q4, q4, q8 \n"
"vst1.s8 {d8-d9}, [%2 :64]! \n"
: "=r"(intptr0),
"=r"(intptr1),
"=r"(ptr)
: "0"(intptr0),
"1"(intptr1),
"2"(ptr),
"w"(_scale0), // %6
"w"(_scale1), // %7
"w"(_bias0), // %8
"w"(_bias1) // %9
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8");
#endif // __aarch64__
}
for (; i < size; i++)
{
#if __aarch64__
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr0));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr1));
_v0 = vmlaq_f32(_bias0, _v0, _scale0);
_v1 = vmlaq_f32(_bias1, _v1, _scale1);
vst1_s8(ptr, float2int8relu(_v0, _v1));
intptr0 += 4;
intptr1 += 4;
ptr += 8;
#else // __aarch64__
asm volatile(
"pld [%0, #128] \n"
"vld1.s32 {d4-d5}, [%0 :128]! \n"
"pld [%1, #128] \n"
"vld1.s32 {d6-d7}, [%1 :128]! \n"
"vmov q0, %q8 \n"
"vmov q1, %q9 \n"
"vcvt.f32.s32 q2, q2 \n"
"vcvt.f32.s32 q3, q3 \n"
"veor d8, d8 \n" // _zero
"vmla.f32 q0, q2, %q6 \n"
"vmla.f32 q1, q3, %q7 \n"
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
"vqmovn.s32 d4, q0 \n"
"vqmovn.s32 d5, q1 \n"
"vqmovn.s16 d4, q2 \n"
"vmax.s8 d4, d4, d8 \n"
"vst1.s8 {d4}, [%2 :64]! \n"
: "=r"(intptr0),
"=r"(intptr1),
"=r"(ptr)
: "0"(intptr0),
"1"(intptr1),
"2"(ptr),
"w"(_scale0), // %6
"w"(_scale1), // %7
"w"(_bias0), // %8
"w"(_bias1) // %9
: "memory", "q0", "q1", "q2", "q3", "q4");
#endif // __aarch64__
}
}
}
}
if (out_elempack == 1)
{
if (bias_data_size == 0)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr0 = top_blob.channel(q * 4);
signed char* ptr1 = top_blob.channel(q * 4 + 1);
signed char* ptr2 = top_blob.channel(q * 4 + 2);
signed char* ptr3 = top_blob.channel(q * 4 + 3);
float32x4_t _scale_in = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 4);
float32x4_t _scale_out = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 4);
float32x4_t _scale = vmulq_f32(_scale_in, _scale_out);
int i = 0;
for (; i < size; i++)
{
float32x4_t _v = vcvtq_f32_s32(vld1q_s32(intptr));
_v = vmulq_f32(_v, _scale);
int8x8_t v = float2int8relu(_v, _v);
ptr0[0] = vget_lane_s8(v, 0);
ptr1[0] = vget_lane_s8(v, 1);
ptr2[0] = vget_lane_s8(v, 2);
ptr3[0] = vget_lane_s8(v, 3);
intptr += 4;
ptr0 += 1;
ptr1 += 1;
ptr2 += 1;
ptr3 += 1;
}
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr0 = top_blob.channel(q * 4);
signed char* ptr1 = top_blob.channel(q * 4 + 1);
signed char* ptr2 = top_blob.channel(q * 4 + 2);
signed char* ptr3 = top_blob.channel(q * 4 + 3);
float32x4_t _scale_in = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 4);
float32x4_t _scale_out = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 4);
float32x4_t _bias = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 4);
float32x4_t _scale = vmulq_f32(_scale_in, _scale_out);
_bias = vmulq_f32(_bias, _scale_out);
int i = 0;
for (; i < size; i++)
{
float32x4_t _v = vcvtq_f32_s32(vld1q_s32(intptr));
#if __aarch64__
_v = vfmaq_f32(_bias, _v, _scale);
#else
_v = vmlaq_f32(_bias, _v, _scale);
#endif
int8x8_t v = float2int8relu(_v, _v);
ptr0[0] = vget_lane_s8(v, 0);
ptr1[0] = vget_lane_s8(v, 1);
ptr2[0] = vget_lane_s8(v, 2);
ptr3[0] = vget_lane_s8(v, 3);
intptr += 4;
ptr0 += 1;
ptr1 += 1;
ptr2 += 1;
ptr3 += 1;
}
}
}
}
}
|
pt_to_pt_multiPingpong.c | /*****************************************************************************
* *
* Mixed-mode OpenMP/MPI MicroBenchmark Suite - Version 1.0 *
* *
* produced by *
* *
* Mark Bull, Jim Enright and Fiona Reid *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk, fiona@epcc.ed.ac.uk *
* *
* *
* Copyright 2012, The University of Edinburgh *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
/*-----------------------------------------------------------*/
/* Contains the point-to-point multi-pingpong mixed mode */
/* OpenMP/MPI benchmarks. */
/* This includes: -masteronly multiPingpong */
/* -funnelled multiPingpong */
/* -multiple multiPingpong */
/*-----------------------------------------------------------*/
#include "pt_to_pt_multiPingpong.h"
/*-----------------------------------------------------------*/
/* multiPingPong */
/* */
/* Driver subroutine for the multi-pingpong benchmark. */
/*-----------------------------------------------------------*/
int multiPingPong(int benchmarkType){
int dataSizeIter;
int pongWorldRank;
char pongProcName[MPI_MAX_PROCESSOR_NAME];
int balance;
pingNode = 0;
pongNode = 1;
/* Check if there's a balance in num of MPI processes
on pingNode and pongNode. */
balance = crossCommBalance(pingNode, pongNode);
/* If not balanced.. */
if (balance == FALSE){
/* ..master prints error */
if (myMPIRank == 0){
printBalanceError();
}
/* ..and all process exit function. */
return 1;
}
/* Exchange MPI_COMM_WORLD ranks for processes in same crossComm */
exchangeWorldRanks(pingNode, pongNode, &pongWorldRank);
/* Processes on pongNode send processor name to pingNode procs. */
sendProcName(pingNode, pongNode, pongProcName);
/* Print comm world ranks & processor name of processes
* taking part in multi-pingpong benchmark.
*/
printMultiProcInfo(pingNode, pongWorldRank, pongProcName);
/* Barrier to ensure that all procs have completed
* printMultiProcInfo before prinring column headings.
*/
MPI_Barrier(comm);
/* Master process then prints report column headings */
if (myMPIRank == 0){
printBenchHeader();
}
/* Initialise repsToDo to defaultReps at start of benchmark */
repsToDo = defaultReps;
dataSizeIter = minDataSize; /* initialise dataSizeIter to minDataSize */
/* Loop over data sizes */
while (dataSizeIter <= maxDataSize){
/* set sizeofBuffer */
sizeofBuffer = dataSizeIter * numThreads;
/* Allocate space for the main data arrays */
allocateMultiPingpongData(sizeofBuffer);
/* warm-up */
if (benchmarkType == MASTERONLY){
/* Masteronly warm-up */
masteronlyMultiPingpong(warmUpIters, dataSizeIter);
}
else if (benchmarkType == FUNNELLED){
/* Funnelled warm-up sweep */
funnelledMultiPingpong(warmUpIters, dataSizeIter);
}
else if (benchmarkType == MULTIPLE){
/* Multiple pingpong warm-up */
multipleMultiPingpong(warmUpIters, dataSizeIter);
}
/* Verification test for multi-pingpong */
testMultiPingpong(sizeofBuffer, dataSizeIter);
/* Initialise benchmark */
benchComplete = FALSE;
/* Keep executing benchmark until target time is reached */
while (benchComplete != TRUE){
/* MPI_Barrier to synchronise processes.
Then start the timer. */
MPI_Barrier(comm);
startTime = MPI_Wtime();
if (benchmarkType == MASTERONLY){
/* Execute masteronly multipingpong repsToDo times */
masteronlyMultiPingpong(repsToDo, dataSizeIter);
}
else if (benchmarkType == FUNNELLED){
/* Execute funnelled multipingpong */
funnelledMultiPingpong(repsToDo, dataSizeIter);
}
else if (benchmarkType == MULTIPLE){
multipleMultiPingpong(repsToDo, dataSizeIter);
}
/* Stop the timer..MPI_Barrier to synchronise processes
* for more accurate timing.
*/
MPI_Barrier(comm);
finishTime = MPI_Wtime();
totalTime = finishTime - startTime;
/* Call repTimeCheck to check if target time is reached. */
if (myMPIRank==0){
benchComplete = repTimeCheck(totalTime, repsToDo);
}
/* Ensure all procs have the same value of benchComplete */
/* and repsToDo */
MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm);
MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm);
} /* End of loop to check if benchComplete is true */
/* Master process sets benchmark results */
if (myMPIRank == 0){
setReportParams(dataSizeIter, repsToDo, totalTime);
printReport();
}
/* Free the allocated space for the main data arrays */
freeMultiPingpongData();
/* Update dataSize before next iteration */
dataSizeIter = dataSizeIter * 2;
} /* end loop over data sizes */
return 0;
}
/*-----------------------------------------------------------*/
/* masteronlyMultiPingpong */
/* */
/* All MPI processes in crossComm = pingNode sends a single */
/* fixed length message to the neighbouring process in */
/* crossComm = pongNode. */
/* The neighbouring processes then sends the message back */
/* to the first process. */
/*-----------------------------------------------------------*/
int masteronlyMultiPingpong(int totalReps, int dataSize){
int repIter, i;
for (repIter = 1; repIter <= totalReps; repIter++){
/* Threads under each MPI process with
* crossCommRank = pingNode write to pingSendBuf
* array with a PARALLEL FOR directive.
*/
if (crossCommRank == pingNode){
#pragma omp parallel for default(none) \
private(i) \
shared(pingSendBuf,dataSize,sizeofBuffer,globalIDarray) \
schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
pingSendBuf[i] = globalIDarray[myThreadID];
}
/* Each process with crossCommRank = pingNode sends
* buffer to MPI process with rank = pongNode in crossComm.
*/
MPI_Send(pingSendBuf, sizeofBuffer, MPI_INT, pongNode, TAG, crossComm);
/* The processes then wait for a message from pong process
* and each thread reads its part of the received buffer.
*/
MPI_Recv(pongRecvBuf, sizeofBuffer, MPI_INT, pongNode, \
TAG, crossComm, &status);
#pragma omp parallel for default(none) \
private(i) \
shared(pongRecvBuf,finalRecvBuf,dataSize,sizeofBuffer) \
schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
finalRecvBuf[i] = pongRecvBuf[i];
}
}
else if (crossCommRank == pongNode){
/* Each process with crossCommRank = pongNode receives
* the message from the pingNode processes.
*/
MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, pingNode,\
TAG, crossComm, &status);
/* Each thread copies its part of the received buffer
* to pongSendBuf.
*/
#pragma omp parallel for default(none) \
private(i) \
shared(pongSendBuf,pingRecvBuf,dataSize,sizeofBuffer) \
schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
pongSendBuf[i] = pingRecvBuf[i];
}
/* The processes now send pongSendBuf to processes
* with crossCommRank = pingNode.
*/
MPI_Send(pongSendBuf, sizeofBuffer, MPI_INT, pingNode, \
TAG, crossComm);
}
} /* End repetitions loop */
return 0;
}
/*-----------------------------------------------------------*/
/* funnelledMultiPingpong */
/* */
/* All MPI processes in crossComm = pingNode sends a single */
/* fixed length message to the neighbouring process in */
/* crossComm = pongNode. */
/* The neighbouring processes then sends the message back */
/* to the first process. */
/* All communication takes place within the OpenMP parallel */
/* region for this benchmark. */
/*-----------------------------------------------------------*/
int funnelledMultiPingpong(int totalReps, int dataSize){
int repIter, i;
/* Open the parallel region for threads */
#pragma omp parallel \
private(i,repIter) \
shared(pingNode,pongNode,pingSendBuf,pingRecvBuf) \
shared(pongSendBuf,pongRecvBuf,finalRecvBuf,sizeofBuffer) \
shared(dataSize,globalIDarray,crossComm,status) \
shared(totalReps,myMPIRank,crossCommRank)
{
/* loop totalRep times */
for (repIter = 1; repIter <= totalReps; repIter++){
/* All threads under each MPI process with
* crossCommRank = pingNode write to pingSendBuf
* array using a parallel for directive.
*/
if (crossCommRank == pingNode){
#pragma omp for schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
pingSendBuf[i] = globalIDarray[myThreadID];
}
/* Implicit barrier at end of omp for takes care of synchronisation */
/* Master thread under each pingNode process sends
* buffer to corresponding MPI process in pongNode
* using crossComm.
*/
#pragma omp master
{
MPI_Send(pingSendBuf, sizeofBuffer, MPI_INT, pongNode, TAG, crossComm);
/* Master thread then waits for a message from the pong process. */
MPI_Recv(pongRecvBuf, sizeofBuffer, MPI_INT, pongNode, TAG, \
crossComm, &status);
}
/* Barrier needed to wait for master thread to complete MPI_Recv */
#pragma omp barrier
/* Each thread then reads its part of the received buffer. */
#pragma omp for schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
finalRecvBuf[i] = pongRecvBuf[i];
}
}
else if (crossCommRank == pongNode){
/* Master thread under each pongNode process receives
* the message from the pingNode processes.
*/
#pragma omp master
{
MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, pingNode,\
TAG, crossComm, &status);
}
/* Barrier needed to wait on master thread */
#pragma omp barrier
/* Each thread reads its part of the received buffer. */
#pragma omp for schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
pongSendBuf[i] = pingRecvBuf[i];
}
/* Implicit barrier at end of omp for */
/* Master threads send their pongSendBuf to processes
* with crossCommRank = pingNode.
*/
#pragma omp master
{
MPI_Send(pongSendBuf, sizeofBuffer, MPI_INT, pingNode, TAG, crossComm);
}
}
} /* End of repetitions loop. */
} /* End of parallel region */
return 0;
}
/*-----------------------------------------------------------*/
/* multipleMultiPingpong */
/* */
/* Multiple threads take place in the communication and */
/* computation. */
/* Each thread of all MPI processes in crossComm = pingNode */
/* sends a portion of the message to the neighbouring */
/* process in crossComm = pongNode. */
/* Each thread of the neighbouring processes then sends */
/* the message back to the first process. */
/*-----------------------------------------------------------*/
int multipleMultiPingpong(int totalReps, int dataSize){
int repIter, i;
int lBound;
/* Open parallel region for threads */
#pragma omp parallel \
private(i,repIter,status,lBound) \
shared(pingNode,pongNode,pingSendBuf,pingRecvBuf) \
shared(pongSendBuf,pongRecvBuf,finalRecvBuf,sizeofBuffer) \
shared(dataSize,globalIDarray,crossComm) \
shared(totalReps,myMPIRank,crossCommRank)
{
for (repIter=1; repIter<=totalReps; repIter++){ /* loop totalRep times */
if (crossCommRank == pingNode){
/* Calculate lower bound of data array for the thread */
lBound = (myThreadID * dataSize);
/* All threads write to its part of the pingBuf
* array using a parallel for directive.
*/
#pragma omp for nowait schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
pingSendBuf[i] = globalIDarray[myThreadID];
}
/* Implicit barrier at end of for not needed for multiple */
/* Each thread under ping process sends dataSize items
* to pongNode process in crossComm.
* myThreadID is used as tag to ensure data goes to
* correct place in buffer.
*/
MPI_Send(&pingSendBuf[lBound], dataSize, MPI_INT, pongNode, \
myThreadID, crossComm);
/* Thread then waits for a message from pongNode. */
MPI_Recv(&pongRecvBuf[lBound], dataSize, MPI_INT, pongNode, \
myThreadID, crossComm, &status);
/* Each thread reads its part of the received buffer. */
#pragma omp for nowait schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
finalRecvBuf[i] = pongRecvBuf[i];
}
}
else if (crossCommRank == pongNode){
/* Calculate lower and upper bound of data array */
lBound = (myThreadID * dataSize);
/* Each thread under pongRank receives a message from
* the ping process.
*/
MPI_Recv(&pingRecvBuf[lBound], dataSize, MPI_INT, pingNode, \
myThreadID, crossComm, &status);
/* Each thread now copies its part of the received buffer
* to pongSendBuf.
*/
#pragma omp for nowait schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
pongSendBuf[i] = pingRecvBuf[i];
}
/* Each thread now sends pongSendBuf to ping process. */
MPI_Send(&pongSendBuf[lBound], dataSize, MPI_INT, pingNode, \
myThreadID, crossComm);
}
} /* End repetitions loop */
} /* End parallel region */
return 0;
}
/*-----------------------------------------------------------*/
/* allocateMultiPingpongData */
/* */
/* Allocates space for the main data arrays. */
/* Size of each array is specified by subroutine argument. */
/*-----------------------------------------------------------*/
int allocateMultiPingpongData(int sizeofBuffer){
if (crossCommRank == pingNode){
/* allocate space for arrays that MPI processes
* with crossCommRank = pingRank will use.
*/
pingSendBuf = (int *)malloc(sizeof(int) * sizeofBuffer);
pongRecvBuf = (int *)malloc(sizeof(int) * sizeofBuffer);
finalRecvBuf = (int *)malloc(sizeof(int) * sizeofBuffer);
}
else if (crossCommRank == pongNode){
/* allocate space for arrays that MPI processes
* with crossCommRank = pongNode will use.
*/
pingRecvBuf = (int *)malloc(sizeof(int) * sizeofBuffer);
pongSendBuf = (int *)malloc(sizeof(int) * sizeofBuffer);
}
return 0;
}
/*-----------------------------------------------------------*/
/* freeMultiPingpongData */
/* */
/* Deallocates the storage space for the main data arrays. */
/*-----------------------------------------------------------*/
int freeMultiPingpongData(){
if (crossCommRank == pingNode){
free(pingSendBuf);
free(pongRecvBuf);
free(finalRecvBuf);
}
else if (crossCommRank == pongNode){
free(pingRecvBuf);
free(pongSendBuf);
}
return 0;
}
/*-----------------------------------------------------------*/
/* testMultiPingpong */
/* */
/* Verifies the the multi pingpong benchmark worked */
/* correctly. */
/*-----------------------------------------------------------*/
int testMultiPingpong(int sizeofBuffer, int dataSize){
int i;
int testFlag, localTestFlag;
/* Initialise localTestFlag to true */
localTestFlag = TRUE;
/* All processes with crossCommRank = pingNode check
* if multi-pingpong worked ok.
*/
if (crossCommRank == pingNode){
/* allocate space for testBuf */
testBuf = (int *)malloc(sizeof(int) * sizeofBuffer);
/* Construct testBuf array with correct values.
* These are the values that should be in finalRecvBuf.
*/
#pragma omp parallel for default(none) \
private(i) \
shared(testBuf,dataSize,sizeofBuffer,globalIDarray)\
schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
testBuf[i] = globalIDarray[myThreadID];
}
/* Compare each element of testBuf and finalRecvBuf */
for (i=0; i<sizeofBuffer; i++){
if (testBuf[i] != finalRecvBuf[i]){
localTestFlag = FALSE;
}
}
/* Free space for testBuf */
free(testBuf);
}
/* Reduce localTestFlag to master */
MPI_Reduce(&localTestFlag, &testFlag, 1, MPI_INT,MPI_LAND, 0, comm);
/* Master then sets testOutcome using reduceFlag */
if (myMPIRank == 0){
setTestOutcome(testFlag);
}
return 0;
}
|
GB_unaryop__lnot_uint32_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint32_uint64
// op(A') function: GB_tran__lnot_uint32_uint64
// C type: uint32_t
// A type: uint64_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint32_uint64
(
uint32_t *Cx, // Cx and Ax may be aliased
uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB050-functionparameter-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
/*
Arrays passed as function parameters
*/
void foo1(double o1[], double c[], int len)
{
int i ;
#pragma omp target data map(to:c[0:len]) map(from:o1[0:len])
#pragma omp target parallel for
for (i = 0; i < len; ++i) {
double volnew_o8 = 0.5 * c[i];
o1[i] = volnew_o8;
}
}
double o1[100];
double c[100];
int main()
{
int i;
int len = 100;
#pragma omp target data map(from:c[0:len], o1[0:len])
#pragma omp target parallel for
for (i = 0; i < len; ++i) {
c[i] = i + 1.01;
o1[i] = i + 1.01;
}
foo1 (o1, c, 100);
for (i = 0; i < len; ++i) {
printf("%lf\n",o1[i]);
}
return 0;
}
|
optest.c | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
int s;
clock_t start, end;
start = clock();
#pragma omp parallel for
for (int i = 0; i < 100000000; i++ )
{
s=s*i;
//printf("i = %d\n", i);
}
end = clock();
printf("%f\n", (double)(end - start)/CLOCKS_PER_SEC);
printf("%d", s)
return 0;
}
|
macro-3.c | /* PR preprocessor/27746 */
/* { dg-do compile } */
/* { dg-options "-fopenmp -fdump-tree-omplower" } */
/* LLVM LOCAL test not applicable */
/* { dg-require-fdump "" } */
#define omp FOO
#define p parallel
#define omp_parallel _Pragma ("omp parallel")
#define omp_p _Pragma ("omp p")
void bar (void);
void
foo (void)
{
#pragma omp parallel
bar ();
#pragma omp p
bar ();
omp_parallel
bar ();
omp_p
bar ();
}
/* { dg-final { scan-tree-dump-times "#pragma omp parallel" 4 "omplower" } } */
|
QED_AEG.h | #pragma once
#include "Constants.h"
#include "Ensemble.h"
#include "Grid.h"
#include "AnalyticalField.h"
#include "Pusher.h"
#include "synchrotron.h"
#include <omp.h>
#include <random>
using namespace constants;
namespace pfc
{
template <class TGrid> // may be AnalyticalField or any Grid type
class ScalarQED_AEG_only_electron : public ParticlePusher
{
public:
ScalarQED_AEG_only_electron()
{
MinProbability = 5e-4;
MaxProbability = 0.01;
SchwingerField = sqr(Constants<FP>::electronMass() * Constants<FP>::lightVelocity())
* Constants<FP>::lightVelocity() / (-Constants<FP>::electronCharge() * Constants<FP>::planck());
preFactor = sqr(Constants<FP>::electronCharge()) * Constants<FP>::electronMass()
* Constants<FP>::lightVelocity() / sqr(Constants<FP>::planck());
coeffPhoton_probability = 1.0;
coeffPair_probability = 0.0;
distribution = std::uniform_real_distribution<FP>(0.0, 1.0);
int max_threads;
#ifdef __USE_OMP__
max_threads = omp_get_max_threads();
#else
max_threads = 1;
#endif
AvalanchePhotons.resize(max_threads);
AvalancheParticles.resize(max_threads);
afterAvalanchePhotons.resize(max_threads);
afterAvalancheParticles.resize(max_threads);
}
void processParticles(Ensemble3d* particles, TGrid* grid, FP timeStep)
{
int max_threads;
#ifdef __USE_OMP__
max_threads = omp_get_max_threads();
#else
max_threads = 1;
#endif
for (int th = 0; th < max_threads; th++)
{
AvalanchePhotons[th].clear();
AvalancheParticles[th].clear();
afterAvalanchePhotons[th].clear();
afterAvalancheParticles[th].clear();
}
if ((*particles)[Photon].size() && coeffPair_probability != 0)
HandlePhotons((*particles)[Photon], grid, timeStep);
if ((*particles)[Electron].size() && coeffPhoton_probability != 0)
HandleParticles((*particles)[Electron], grid, timeStep);
if ((*particles)[Positron].size() && coeffPhoton_probability != 0)
HandleParticles((*particles)[Positron], grid, timeStep);
for (int th = 0; th < max_threads; th++)
{
for (int ind = 0; ind < afterAvalanchePhotons[th].size(); ind++)
{
particles->addParticle(afterAvalanchePhotons[th][ind]);
}
for (int ind = 0; ind < afterAvalancheParticles[th].size(); ind++)
{
particles->addParticle(afterAvalancheParticles[th][ind]);
}
}
}
void Boris(Particle3d&& particle, const FP3& e, const FP3& b, FP timeStep)
{
FP eCoeff = timeStep * particle.getCharge() / (2 * particle.getMass() * Constants<FP>::lightVelocity());
FP3 eMomentum = e * eCoeff;
FP3 um = particle.getP() + eMomentum;
FP3 t = b * eCoeff / sqrt((FP)1 + um.norm2());
FP3 uprime = um + cross(um, t);
FP3 s = t * (FP)2 / ((FP)1 + t.norm2());
particle.setP(eMomentum + um + cross(uprime, s));
particle.setPosition(particle.getPosition() + timeStep * particle.getVelocity());
}
void Boris(ParticleProxy3d&& particle, const FP3& e, const FP3& b, FP timeStep)
{
FP eCoeff = timeStep * particle.getCharge() / (2 * particle.getMass() * Constants<FP>::lightVelocity());
FP3 eMomentum = e * eCoeff;
FP3 um = particle.getP() + eMomentum;
FP3 t = b * eCoeff / sqrt((FP)1 + um.norm2());
FP3 uprime = um + cross(um, t);
FP3 s = t * (FP)2 / ((FP)1 + t.norm2());
particle.setP(eMomentum + um + cross(uprime, s));
particle.setPosition(particle.getPosition() + timeStep * particle.getVelocity());
}
void HandlePhotons(ParticleArray3d& particles, TGrid* grid, FP timeStep)
{
FP dt = timeStep;
#pragma omp parallel for schedule(dynamic, 1)
for (int i = 0; i < particles.size(); i++)
{
int thread_id;
#ifdef __USE_OMP__
thread_id = omp_get_thread_num();
#else
thread_id = 0;
#endif
FP3 pPos = particles[i].getPosition();
FP3 k = particles[i].getVelocity();
FP3 e, b;
e = grid->getE(pPos);
b = grid->getB(pPos);
k = (1 / k.norm()) * k; // normalized wave vector
particles[i].setPosition(pPos + dt * Constants<FP>::lightVelocity() * k);
FP H_eff = sqrt(sqr(e + VP(k, b)) - sqr(SP(e, k)));
FP HE = H_eff / SchwingerField;
FP pGamma = particles[i].getMomentum().norm() / (Constants<FP>::electronMass() * Constants<FP>::lightVelocity());
FP EstimatedProbability = dt * estimatedPhotons(HE, pGamma);
FP Factor = 1;
if (EstimatedProbability < MinProbability)
{
FP r0 = random_number_omp();
if (r0 > EstimatedProbability / MinProbability)
continue;
else
Factor = MinProbability / EstimatedProbability;
}
if (EstimatedProbability < MaxProbability)
{
//=======handle single event========
double gamma = pGamma;
double chi = gamma * H_eff / SchwingerField;
double delta = Pair_Generator(Factor, chi, gamma, dt);
if (delta != 0)
{
Particle3d NewParticle;
NewParticle.setType(Electron);
NewParticle.setWeight(particles[i].getWeight());
NewParticle.setPosition(particles[i].getPosition());
NewParticle.setMomentum(delta * particles[i].getMomentum());
afterAvalancheParticles[thread_id].push_back(NewParticle);
NewParticle.setType(Positron);
NewParticle.setMomentum((1 - delta) * particles[i].getMomentum());
afterAvalancheParticles[thread_id].push_back(NewParticle);
//deletePhoton
}
}
else {
//=======handle avalanche========
AvalancheParticles[thread_id].clear();
AvalanchePhotons[thread_id].clear();
AvalanchePhotons[thread_id].push_back(particles[i]);
particles[i].setPosition(particles[i].getPosition() - dt * Constants<FP>::lightVelocity() * k); // go back
RunAvalanche(H_eff, e, b, Photon, pGamma, dt);
//deletePhoton
for (int k = 0; k != AvalanchePhotons[thread_id].size(); k++)
afterAvalanchePhotons[thread_id].push_back(AvalanchePhotons[thread_id][k]);
for (int k = 0; k != AvalancheParticles[thread_id].size(); k++)
afterAvalancheParticles[thread_id].push_back(AvalancheParticles[thread_id][k]);
}
}
}
void HandleParticles(ParticleArray3d& particles, TGrid* grid, FP timeStep)
{
FP dt = timeStep;
#pragma omp parallel for schedule(dynamic, 1)
for (int i = 0; i < particles.size(); i++)
{
int thread_id;
#ifdef __USE_OMP__
thread_id = omp_get_thread_num();
#else
thread_id = 0;
#endif
FP3 pPos = particles[i].getPosition();
FP3 v = particles[i].getVelocity();
FP3 e, b;
e = grid->getE(pPos);
b = grid->getB(pPos);
FP H_eff = sqr(e + (1 / Constants<FP>::lightVelocity()) * VP(v, b))
- sqr(SP(e, v) / Constants<FP>::lightVelocity());
if (H_eff < 0)
H_eff = 0;
H_eff = sqrt(H_eff);
FP pGamma = particles[i].getGamma();
FP HE = H_eff / SchwingerField;
FP EstimatedProbability = dt * estimatedParticles(HE, pGamma);
FP Factor = 1;
if (EstimatedProbability < MinProbability)
{
FP r0 = random_number_omp();
if (r0 > EstimatedProbability / MinProbability)
{
Boris(particles[i], e, b, dt);
continue;
}
else
Factor = MinProbability / EstimatedProbability;
}
if (EstimatedProbability < MaxProbability)
{
//=======handle single event========
double gamma = pGamma;
double chi = gamma * H_eff / SchwingerField;
double delta = Photon_MGenerator(Factor, chi, gamma, dt);
if (delta != 0)
{
Particle3d NewParticle;
NewParticle.setType(Photon);
NewParticle.setWeight(particles[i].getWeight());
NewParticle.setPosition(particles[i].getPosition());
NewParticle.setMomentum(delta * particles[i].getMomentum());
afterAvalanchePhotons[thread_id].push_back(NewParticle);
particles[i].setMomentum((1 - delta) * particles[i].getMomentum());
}
Boris(particles[i], e, b, dt);
}
else
{
//=======handle avalanche========
AvalancheParticles[thread_id].clear();
AvalanchePhotons[thread_id].clear();
AvalancheParticles[thread_id].push_back(particles[i]);
RunAvalanche(H_eff, e, b, particles[i].getType(), pGamma, dt);
for (int k = 0; k != AvalanchePhotons[thread_id].size(); k++)
afterAvalanchePhotons[thread_id].push_back(AvalanchePhotons[thread_id][k]);
particles[i].setMomentum(AvalancheParticles[thread_id][0].getMomentum());
particles[i].setPosition(AvalancheParticles[thread_id][0].getPosition());
for (int k = 1; k != AvalancheParticles[thread_id].size(); k++)
afterAvalancheParticles[thread_id].push_back(AvalancheParticles[thread_id][k]);
}
}
}
void RunAvalanche(double H_eff_global, const FP3& E, const FP3& B, int SeedType, double gamma, double dt)
{
int thread_id;
#ifdef __USE_OMP__
thread_id = omp_get_thread_num();
#else
thread_id = 0;
#endif
vector<Particle3d>& AvalancheParticles = this->AvalancheParticles[thread_id];
vector<Particle3d>& AvalanchePhotons = this->AvalanchePhotons[thread_id];
gamma = max(gamma, 1.0);
FP HE = H_eff_global / SchwingerField;
FP sub_dt = MaxProbability / estimatedParticles(HE, gamma);
int NT = 1 + int(dt / sub_dt);
sub_dt = dt / FP(NT);
for (int i = 0; i != NT; i++)
{
for (int k = 0; k != AvalancheParticles.size(); k++)
{
Boris(AvalancheParticles[k], E, B, sub_dt);
FP3 v = AvalancheParticles[k].getVelocity();
FP H_eff = sqr(E + (1 / Constants<FP>::lightVelocity()) * VP(v, B))
- sqr(SP(E, v) / Constants<FP>::lightVelocity());
if (H_eff < 0) H_eff = 0;
H_eff = sqrt(H_eff);
FP gamma = AvalancheParticles[k].getGamma();
FP chi = gamma * H_eff / SchwingerField;
FP delta = Photon_MGenerator(1, chi, gamma, sub_dt);
if (delta != 0)
{
Particle3d NewParticle;
NewParticle.setType(Photon);
NewParticle.setWeight(AvalancheParticles[k].getWeight());
NewParticle.setPosition(AvalancheParticles[k].getPosition());
NewParticle.setMomentum(delta * AvalancheParticles[k].getMomentum());
AvalanchePhotons.push_back(NewParticle);
AvalancheParticles[k].setMomentum((1 - delta) * AvalancheParticles[k].getMomentum());
}
}
for (int k = 0; k < AvalanchePhotons.size(); k++)
{
FP3 k_ = AvalanchePhotons[k].getVelocity();
k_ = (1 / k_.norm()) * k_; // normalized wave vector
AvalanchePhotons[k].setPosition(AvalanchePhotons[k].getPosition()
+ sub_dt * Constants<FP>::lightVelocity() * k_);
FP H_eff = sqrt(sqr(E + VP(k_, B)) - sqr(SP(E, k_)));
FP gamma = AvalanchePhotons[k].getMomentum().norm()
/ (Constants<FP>::electronMass() * Constants<FP>::lightVelocity());
FP chi = gamma * H_eff / SchwingerField;
FP delta = Pair_Generator(1, chi, gamma, sub_dt);
if (delta != 0)
{
Particle3d NewParticle;
NewParticle.setType(Electron);
NewParticle.setWeight(AvalanchePhotons[k].getWeight());
NewParticle.setPosition(AvalanchePhotons[k].getPosition());
NewParticle.setMomentum(delta * AvalanchePhotons[k].getMomentum());
AvalancheParticles.push_back(NewParticle);
NewParticle.setType(Positron);
NewParticle.setMomentum((1 - delta) * AvalanchePhotons[k].getMomentum());
AvalancheParticles.push_back(NewParticle);
AvalanchePhotons[k] = AvalanchePhotons[AvalanchePhotons.size() - 1];
AvalanchePhotons.pop_back();
k--;
}
}
}
}
FP estimatedPhotons(FP HE, FP gamma)
{
return (0.0827 * HE) * preFactor;
}
FP estimatedParticles(FP HE, FP gamma)
{
FP b = 3.0 / 2.0 * HE * gamma;
FP newFactor;
if (b < 0.1)
{
newFactor = 0.962436 * b / gamma + 0.0827 * HE;
}
else if (b < 0.5)
{
newFactor = 0.779009 * pow(b, 11.0 / 12.0) / gamma + 0.0827 * HE;
}
else if (b < 10)
{
newFactor = 0.721193 * pow(b, 19.0 / 24.0) / gamma + 0.0827 * HE;
}
else
{
newFactor = 0.955556 * pow(b, 2.0 / 3.0) / gamma + 0.0827 * HE;
}
return newFactor * preFactor;
}
FP Photon_probability(FP chi, FP gamma, FP d)
{
FP z = (2 / 3.0) * (1 / chi) * d / (1 - d);
FP coeff = (sqrt(3.0) / (2.0 * pi)) * coeffPhoton_probability;
if ((z < 700) && (z > 0))
return coeff * (chi / gamma) * ((1 - d) / d) * (synchrotron_1(z) + (3 / 2.0) * d * chi * z * synchrotron_2(z));
else
return 0;
}
FP Pair_probability(FP chi, FP gamma, FP d)
{
FP z_p = (2 / 3.0) / (chi * (1 - d) * d);
FP coeff = (sqrt(3.0) / (2.0 * pi)) * coeffPair_probability;
if ((z_p < 700) && (z_p > 0))
return coeff * (chi / gamma) * (d - 1) * d * (synchrotron_1(z_p) - (3 / 2.0) * chi * z_p * synchrotron_2(z_p));
else
return 0;
}
FP Pair_Generator(FP Factor, FP chi, FP gamma, FP dt) //returns photon energy in mc2gamma in case of generation.
{
FP factor = Factor * dt * preFactor;
FP r1 = random_number_omp();
FP r2 = random_number_omp();
if (r2 < factor * Pair_probability(chi, gamma, r1))
return r1;
else
return 0;
}
FP Photon_MGenerator(FP Factor, FP chi, FP gamma, FP dt) //Modified event generator: returns photon energy in mc2gamma in case of generation, !doesn't change gamma
{
double r0 = random_number_omp();
double r1 = r0 * r0 * r0;
double r2 = random_number_omp();
double factor = Factor * dt * preFactor;
if (r2 < factor * Photon_probability(chi, gamma, r1) * 3 * r0 * r0)
return r1;
else
return 0;
}
void operator()(ParticleProxy3d* particle, ValueField field, FP timeStep)
{}
void operator()(Particle3d* particle, ValueField field, FP timeStep)
{
ParticleProxy3d particleProxy(*particle);
this->operator()(&particleProxy, field, timeStep);
}
private:
FP random_number_omp()
{
FP rand_n;
#pragma omp critical
rand_n = distribution(rand_generator);
return rand_n;
}
FP MinProbability, MaxProbability;
FP SchwingerField;
FP preFactor;
FP coeffPhoton_probability, coeffPair_probability;
std::default_random_engine rand_generator;
std::uniform_real_distribution<FP> distribution;
vector<vector<Particle3d>> AvalanchePhotons, AvalancheParticles;
vector<vector<Particle3d>> afterAvalanchePhotons, afterAvalancheParticles;
};
typedef ScalarQED_AEG_only_electron<YeeGrid> ScalarQED_AEG_only_electron_Yee;
typedef ScalarQED_AEG_only_electron<PSTDGrid> ScalarQED_AEG_only_electron_PSTD;
typedef ScalarQED_AEG_only_electron<PSATDGrid> ScalarQED_AEG_only_electron_PSATD;
typedef ScalarQED_AEG_only_electron<AnalyticalField> ScalarQED_AEG_only_electron_Analytical;
} |
taskloop.c | #include <omp.h>
void main ( omp_lock_t*lock, int n )
{
int data1 = 10;
int N = 100;
int M = 50;
#pragma omp parallel
{
#pragma omp single
#pragma omp taskgroup
{
compute_update(data1);
#pragma omp taskloop collapse(2) nogroup
for (int i=0; i<N; i++)
for (int j=0; j<M; j++)
data1 = data1 + 1.3;
}
}
}
|
jump.c | #include "MPKMC.h"
int KMCUniteIndexes(int ncluster, int ids0[], int ids1[], int ids2[])
{
int i, j;
int count = ncluster;
for (i = 0; i < ncluster; i++) {
ids2[i] = ids0[i];
}
count = i;
for (j = 0; j < ncluster; j++) {
for (i = 0; i < ncluster; i++) {
if (ids1[j] == ids0[i]) break;
}
if (i == ncluster) {
ids2[count++] = ids1[j];
}
}
return count;
}
void KMCSwapType(MP_KMCData *data, int id0, int id1)
{
int type0;
type0 = data->grid[id0].type;
data->grid[id0].type = data->grid[id1].type;
data->grid[id1].type = type0;
}
static double GridClusterEnergy(MP_KMCData *data, int nncluster, int ids[])
{
int i;
double cle = 0.0;
for (i = 0; i < nncluster; i++) {
cle += data->grid[ids[i]].energy;
}
return cle;
}
static int CompareTypes(MP_KMCData *data, short types0[], short types1[])
{
int j, k;
int count;
int sp;
if (types0[0] == types1[0]) {
for (j = 0; j < data->nrot; j++) {
count = 0;
sp = j * data->ncluster;
for (k = 1; k < data->ncluster; k++) {
if (types0[k] == types1[data->rotid[sp + k]]) count++;
}
if (count == data->ncluster - 1) return TRUE;
}
}
return FALSE;
}
static void CalcClusterEnergies(MP_KMCData *data, double(*func)(MP_KMCData *, short *),
int ncluster, int ids[], double energy[], int *table_update)
{
int j, k;
int ttid;
int tid[MP_KMC_NCLUSTER_MAX * 2];
short types[MP_KMC_NCLUSTER_MAX * 2][MP_KMC_NCLUSTER_MAX];
if (data->table_use) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j = 0; j < ncluster; j++) {
if (data->grid[ids[j]].type > 0) {
MP_KMCClusterTypes(data, ids[j], types[j]);
tid[j] = MP_KMCSearchCluster(data, types[j]);
if (tid[j] >= 0) {
energy[j] = data->table[tid[j]].energy;
}
else {
if (func != NULL) energy[j] = (func)(data, types[j]);
else energy[j] = 0.0;
}
}
else if (data->grid[ids[j]].type == 0) {
tid[j] = -99;
energy[j] = 0.0;
}
}
for (j = 0; j < ncluster; j++) {
if (tid[j] >= 0) {
data->table[tid[j]].refcount += 1;
}
else if (tid[j] == -1) {
ttid = MP_KMCAddCluster(data, types[j], energy[j], 0);
for (k = j + 1; k < ncluster; k++) {
if (tid[k] == -1 && CompareTypes(data, types[j], types[k])) {
tid[k] = ttid;
}
}
*table_update = TRUE;
}
}
}
else {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j = 0; j < ncluster; j++) {
if (data->grid[ids[j]].type > 0) {
MP_KMCClusterTypes(data, ids[j], types[j]);
if (func != NULL) energy[j] = (func)(data, types[j]);
else energy[j] = 0.0;
}
else if (data->grid[ids[j]].type == 0) {
energy[j] = 0.0;
}
}
}
}
static double ClusterEnergy(MP_KMCData *data, double(*func)(MP_KMCData *, short *),
int ncluster, int ids[], double energy[], int *table_update)
{
int j;
double cle = 0.0;
CalcClusterEnergies(data, func, ncluster, ids, energy, table_update);
for (j = 0; j < ncluster; j++) {
cle += energy[j];
}
return cle;
}
int KMCAddEvent(MP_KMCData *data, int dp, int dpp, int id0, int id1, double de, int dmcs)
{
int nevent_max;
int eid;
if (data->nevent >= data->nevent_max) {
nevent_max = data->nevent_max + data->nevent_step;
data->event = (MP_KMCEventItem *)realloc(data->event, nevent_max * sizeof(MP_KMCEventItem));
if (data->event == NULL) {
fprintf(stderr, "Error : allocation failure (KMCAddEvent)\n");
return MP_KMC_MEM_ERR;
}
data->nevent_max = nevent_max;
}
eid = data->nevent;
data->event[eid].dp = dp;
data->event[eid].dpp = dpp;
data->event[eid].id0 = id0;
data->event[eid].id1 = id1;
data->event[eid].de = de;
data->event[eid].dmcs = dmcs;
data->nevent++;
data->event_pt = data->nevent;
return eid;
}
int KMCAddHistory(MP_KMCData *data, long totmcs, double temp, int ntry, int njump, int table_update, int ntable, double tote, double time)
{
int nhistory_max;
int hid;
if (data->nhistory >= data->nhistory_max) {
nhistory_max = data->nhistory_max + data->nhistory_step;
data->history = (MP_KMCHistoryItem *)realloc(data->history, nhistory_max * sizeof(MP_KMCHistoryItem));
if (data->history == NULL) {
fprintf(stderr, "Error : allocation failure (KMCAddHistory)\n");
return MP_KMC_MEM_ERR;
}
data->nhistory_max = nhistory_max;
}
hid = data->nhistory;
data->history[hid].totmcs = totmcs;
data->history[hid].temp = temp;
data->history[hid].ntry = ntry;
data->history[hid].njump = njump;
data->history[hid].table_update = table_update;
data->history[hid].ntable = ntable;
data->history[hid].tote = tote;
data->history[hid].time = time;
data->nhistory++;
return hid;
}
int MP_KMCGridEnergy(MP_KMCData *data, double(*func)(MP_KMCData *, short *))
{
int j;
int id = 0;
int ncluster = 0;
int step = MP_KMC_NCLUSTER_MAX;
int ids[MP_KMC_NCLUSTER_MAX];
double energy[MP_KMC_NCLUSTER_MAX];
int table_update = FALSE;
while (TRUE) {
ids[ncluster++] = id++;
if (ncluster >= step || id >= data->ntot) {
CalcClusterEnergies(data, func, ncluster, ids, energy, &table_update);
for (j = 0; j < ncluster; j++) {
data->grid[ids[j]].energy = energy[j];
data->tote += energy[j];
}
ncluster = 0;
if (id >= data->ntot) break;
}
}
return table_update;
}
MP_KMCHistoryItem MP_KMCJump(MP_KMCData *data, int ntry, double temp, double(*func)(MP_KMCData *, short *))
{
int j, c;
int dp, jp, dpp;
int id0, id1;
int ids0[MP_KMC_NCLUSTER_MAX];
int ids1[MP_KMC_NCLUSTER_MAX];
int ids2[MP_KMC_NCLUSTER_MAX * 2];
int nncluster;
double kt;
double energy[MP_KMC_NCLUSTER_MAX * 2];
double cle0, cle1, clde;
int ntried = 0;
int njump = 0;
int table_update = FALSE;
int dmcs;
int hid;
MP_KMCHistoryItem err = { 0, 0.0, 0, 0, 0, 0, 0.0, 0.0 };
clock_t start = clock();
if (data->event_pt != data->nevent) {
fprintf(stderr, "Error : invalid current event point (MP_KMCJump)\n");
return err;
}
for (j = 0, c = 0; j < data->nsolute; j++) {
if (data->solute[j].jump) c++;
}
if (c < 1) {
fprintf(stderr, "Error : no solute that can jump (MP_KMCJump)\n");
return err;
}
kt = data->kb*temp;
dmcs = data->totmcs - data->mcs;
while (ntried < ntry) {
dp = (int)(MP_Rand(&(data->rand_seed)) * data->dpmax);
jp = (int)(MP_Rand(&(data->rand_seed)) * (data->jpmax - 1)) + 1;
if (dp < data->dpmax && jp < data->jpmax) {
data->totmcs++, dmcs++;
id0 = data->solute[dp].id;
MP_KMCClusterIndexes(data, id0, ids0);
id1 = ids0[jp];
if (data->grid[id0].type != data->grid[id1].type) {
MP_KMCClusterIndexes(data, id1, ids1);
nncluster = KMCUniteIndexes(data->ncluster, ids0, ids1, ids2);
cle0 = GridClusterEnergy(data, nncluster, ids2);
KMCSwapType(data, id0, id1);
cle1 = ClusterEnergy(data, func, nncluster, ids2, energy, &table_update);
clde = cle1 - cle0;
if (clde < 0.0 || MP_Rand(&(data->rand_seed)) < exp(-clde / kt)) {
for (j = 0; j < nncluster; j++) {
data->grid[ids2[j]].energy = energy[j];
}
for (j = 0; j < data->nsolute; j++) {
if (j != dp && data->solute[j].id == id1) break;
}
if (j < data->nsolute) {
data->solute[j].id = id0;
data->solute[j].njump++;
dpp = j;
}
else dpp = -1;
data->solute[dp].id = id1;
data->solute[dp].njump++;
data->tote += clde;
if (data->event_record) {
if (KMCAddEvent(data, dp, dpp, id0, id1, clde, dmcs) == MP_KMC_MEM_ERR) return err;
dmcs = 0;
}
data->mcs = data->totmcs;
njump++;
}
else {
KMCSwapType(data, id0, id1);
}
}
ntried++;
}
}
hid = KMCAddHistory(data, data->totmcs, temp, ntry, njump, table_update, data->ntable, data->tote,
(double)(clock() - start) / CLOCKS_PER_SEC);
if (hid == MP_KMC_MEM_ERR) return err;
else return data->history[hid];
}
|
CacheEfficientHogwildTrainer.h | /*
* Copyright 2016 [See AUTHORS file for list of authors]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _CACHE_EFFICIENT_HOGWILD_TRAINER_
#define _CACHE_EFFICIENT_HOGWILD_TRAINER_
#include <map>
#include "../Partitioner/DFSCachePartitioner.h"
#include "../Partitioner/GreedyCachePartitioner.h"
DEFINE_bool(dfs_cache_partitioner, false, "For cache efficient hogwild trainer, use the DFS method to cache partition data points.");
DEFINE_bool(greedy_cache_partitioner, false, "For cache efficient hogwild trainer, use an n^2 greedy algorithm to generate cache friendyl data point ordering.");
class CacheEfficientHogwildTrainer : public Trainer {
protected:
void PrintStatsAboutProblem(const std::vector<Datapoint *> &datapoints) {
int n_total_coordinate_accesses = 0;
int n_distinct_model_accesses = 0;
double avg_num_coordinates_accessed_per_datapoint = 0;
int max_coordinates_accessed_per_datapoint = 0;
int min_coordinates_accessed_per_datapoint = INT_MAX;
std::map<int, bool> coordinates_set;
for (int i = 0; i < datapoints.size(); i++) {
n_total_coordinate_accesses += datapoints[i]->GetCoordinates().size();
for (const auto & coordinate : datapoints[i]->GetCoordinates()) {
coordinates_set[coordinate] = 1;
}
max_coordinates_accessed_per_datapoint = fmax(max_coordinates_accessed_per_datapoint, datapoints[i]->GetCoordinates().size());
min_coordinates_accessed_per_datapoint = fmin(max_coordinates_accessed_per_datapoint, datapoints[i]->GetCoordinates().size());
}
n_distinct_model_accesses = coordinates_set.size();
avg_num_coordinates_accessed_per_datapoint = n_total_coordinate_accesses / (double)datapoints.size();
printf("n_datapoints=%d\n"
"n_total_coordinate_accesses=%d\n"
"n_distinct_model_acceses=%d\n"
"avg_num_coordinates_accessed_per_datapoint=%lf\n"
"max_coordinates_accessed=%d\n"
"min_coordinates_accessed=%d\n",
(int)datapoints.size(),
n_total_coordinate_accesses,
n_distinct_model_accesses,
avg_num_coordinates_accessed_per_datapoint,
max_coordinates_accessed_per_datapoint,
min_coordinates_accessed_per_datapoint);
}
public:
CacheEfficientHogwildTrainer() {}
~CacheEfficientHogwildTrainer() {}
TrainStatistics Train(Model *model, const std::vector<Datapoint *> & datapoints, Updater *updater) override {
// Print some stats.
PrintStatsAboutProblem(datapoints);
// Partition.
Timer partition_timer;
DatapointPartitions partitions(FLAGS_n_threads);
if (FLAGS_dfs_cache_partitioner) {
DFSCachePartitioner partitioner;
partitions = partitioner.Partition(datapoints, FLAGS_n_threads);
}
else if (FLAGS_greedy_cache_partitioner) {
GreedyCachePartitioner partitioner;
partitions = partitioner.Partition(datapoints, FLAGS_n_threads);
}
else {
std::cout << "CacheEfficientHogwildTrainer.h: No partitioning method selected" << std::endl;
exit(0);
}
if (FLAGS_print_partition_time) {
this->PrintPartitionTime(partition_timer);
}
model->SetUpWithPartitions(partitions);
updater->SetUpWithPartitions(partitions);
TrainStatistics stats;
// Train.
Timer gradient_timer;
for (int epoch = 0; epoch < FLAGS_n_epochs; epoch++) {
this->EpochBegin(epoch, gradient_timer, model, datapoints, &stats);
updater->EpochBegin();
#pragma omp parallel for schedule(static, 1)
for (int thread = 0; thread < FLAGS_n_threads; thread++) {
for (int batch = 0; batch < partitions.NumBatches(); batch++) {
for (int index = 0; index < partitions.NumDatapointsInBatch(thread, batch); index++) {
updater->Update(model, partitions.GetDatapoint(thread, batch, index));
}
}
}
updater->EpochFinish();
}
return stats;
}
};
#endif
|
solver-omp-op1.c | #define lowerb(id, p, n) ( id * (n/p) + (id < (n%p) ? id : n%p) )
#define numElem(id, p, n) ( (n/p) + (id < (n%p)) )
#define upperb(id, p, n) ( lowerb(id, p, n) + numElem(id, p, n) - 1 )
#define min(a, b) ( (a < b) ? a : b )
#define max(a, b) ( (a > b) ? a : b )
#include "omp.h"
// Function to copy one matrix into another
void copy_mat (double *u, double *v, unsigned sizex, unsigned sizey) {
int numprocs = omp_get_num_threads();
#pragma omp parallel
{
int myid = omp_get_thread_num();
int i_start = lowerb(myid, numprocs, sizex);
int i_end = upperb(myid, numprocs, sizex);
for (int i=max(1, i_start); i<=min(sizex-2, i_end); i++) {
for (int j=1; j<=sizey-2; j++)
v[i*sizey+j] = u[i*sizey+j];
}
}
}
// 1D-blocked Jacobi solver: one iteration step
double relax_jacobi (double *u, double *utmp, unsigned sizex, unsigned sizey) {
double diff, sum=0.0;
int nblocks = 4;
int numprocs = omp_get_num_threads();
#pragma omp parallel private(diff) reduction(+: sum)
{
int myid = omp_get_thread_num();
int i_start = lowerb(myid, numprocs, sizex);
int i_end = upperb(numprocs-1, numprocs, sizex);
for (int i=max(1, i_start); i<=i_end; i++) {
for (int j=1; j<=sizey-2; j++)
{ int index = i*sizey+j;
if(index%nblocks == myid)
{
utmp[index] = 0.25 * ( u[ i*sizey + (j-1) ] + // left
u[ i*sizey + (j+1) ] + // right
u[ (i-1)*sizey + j ] + // top
u[ (i+1)*sizey + j ] ) ;// bottom
diff = utmp[i*sizey+j] - u[i*sizey + j];
sum += diff * diff;
}
}
}
}
return sum;
}
// 2D-blocked Gauss-Seidel solver: one iteration step
double relax_gauss (double *u, unsigned sizex, unsigned sizey) {
double unew, diff, sum=0.0;
int numprocs=omp_get_max_threads();
#pragma omp parallel for ordered(2) private(unew,diff) reduction(+:sum)
for (int r = 0; r < numprocs; ++r) {
for (int c = 0; c < numprocs; ++c) {
int r_start = lowerb(r, numprocs, sizex);
int r_end = upperb(r, numprocs, sizex);
int c_start = lowerb(c, numprocs, sizey);
int c_end = upperb(c, numprocs, sizey);
#pragma omp ordered depend(sink: r-1, c)
for (int i=max(1, r_start); i<= min(sizex-2, r_end); i++) {
for (int j=max(1, c_start); j<= min(sizey-2,c_end); j++) {
unew= 0.25 * ( u[ i*sizey + (j-1) ]+ // left
u[ i*sizey + (j+1) ]+ // right
u[ (i-1)*sizey + j ]+ // top
u[ (i+1)*sizey + j ]); // bottom
diff = unew - u[i*sizey+ j];
sum += diff * diff;
u[i*sizey+j]=unew;
}
}
#pragma omp ordered depend(source)
}
}
return sum;
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 24;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
bml_multiply_ellsort_typed.c | #include "../../macros.h"
#include "../../typed.h"
#include "../bml_add.h"
#include "../bml_allocate.h"
#include "../bml_logger.h"
#include "../bml_multiply.h"
#include "../bml_parallel.h"
#include "../bml_types.h"
#include "bml_add_ellsort.h"
#include "bml_allocate_ellsort.h"
#include "bml_multiply_ellsort.h"
#include "bml_types_ellsort.h"
#include <complex.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Matrix multiply.
*
* \f$ C \leftarrow \alpha A \, B + \beta C \f$
*
* \ingroup multiply_group
*
* \param A Matrix A
* \param B Matrix B
* \param C Matrix C
* \param alpha Scalar factor multiplied by A * B
* \param beta Scalar factor multiplied by C
* \param threshold Used for sparse multiply
*/
void TYPED_FUNC(
bml_multiply_ellsort) (
bml_matrix_ellsort_t * A,
bml_matrix_ellsort_t * B,
bml_matrix_ellsort_t * C,
double alpha,
double beta,
double threshold)
{
double ONE = 1.0;
double ZERO = 0.0;
void *trace = NULL;
if (A == NULL || B == NULL)
{
LOG_ERROR("Either matrix A or B are NULL\n");
}
if (A == B && alpha == ONE && beta == ZERO)
{
trace = TYPED_FUNC(bml_multiply_x2_ellsort) (A, C, threshold);
}
else
{
bml_matrix_dimension_t matrix_dimension = { C->N, C->N, C->M };
bml_matrix_ellsort_t *A2 =
TYPED_FUNC(bml_noinit_matrix_ellsort) (matrix_dimension,
A->distribution_mode);
if (A != NULL && A == B)
{
trace = TYPED_FUNC(bml_multiply_x2_ellsort) (A, A2, threshold);
}
else
{
TYPED_FUNC(bml_multiply_AB_ellsort) (A, B, A2, threshold);
}
#ifdef DO_MPI
if (bml_getNRanks() > 1 && A2->distribution_mode == distributed)
{
bml_allGatherVParallel(A2);
}
#endif
TYPED_FUNC(bml_add_ellsort) (C, A2, beta, alpha, threshold);
bml_deallocate_ellsort(A2);
}
bml_free_memory(trace);
}
/** Matrix multiply.
*
* \f$ X^{2} \leftarrow X \, X \f$
*
* \ingroup multiply_group
*
* \param X Matrix X
* \param X2 Matrix X2
* \param threshold Used for sparse multiply
*/
void *TYPED_FUNC(
bml_multiply_x2_ellsort) (
bml_matrix_ellsort_t * X,
bml_matrix_ellsort_t * X2,
double threshold)
{
int *X_localRowMin = X->domain->localRowMin;
int *X_localRowMax = X->domain->localRowMax;
int X_N = X->N;
int X_M = X->M;
int *X_index = X->index;
int *X_nnz = X->nnz;
int X2_N = X2->N;
int X2_M = X2->M;
int *X2_index = X2->index;
int *X2_nnz = X2->nnz;
REAL_T traceX = 0.0;
REAL_T traceX2 = 0.0;
REAL_T *X_value = (REAL_T *) X->value;
REAL_T *X2_value = (REAL_T *) X2->value;
double *trace = bml_allocate_memory(sizeof(double) * 2);
int myRank = bml_getMyRank();
#if !(defined(__IBMC__) || defined(__ibmxl__))
int ix[X_N], jx[X_N];
REAL_T x[X_N];
memset(ix, 0, X_N * sizeof(int));
memset(jx, 0, X_N * sizeof(int));
memset(x, 0.0, X_N * sizeof(REAL_T));
#endif
#if defined(__IBMC__) || defined(__ibmxl__)
#pragma omp parallel for \
shared(X_N, X_M, X_index, X_nnz, X_value, myRank) \
shared(X2_N, X2_M, X2_index, X2_nnz, X2_value) \
shared(X_localRowMin, X_localRowMax) \
reduction(+: traceX, traceX2)
#else
#pragma omp parallel for \
shared(X_N, X_M, X_index, X_nnz, X_value, myRank) \
shared(X2_N, X2_M, X2_index, X2_nnz, X2_value) \
shared(X_localRowMin, X_localRowMax) \
firstprivate(ix, jx, x) \
reduction(+: traceX, traceX2)
#endif
//for (int i = 0; i < X_N; i++) // CALCULATES THRESHOLDED X^2
for (int i = X_localRowMin[myRank]; i < X_localRowMax[myRank]; i++) // CALCULATES THRESHOLDED X^2
{
#if defined(__IBMC__) || defined(__ibmxl__)
int ix[X_N], jx[X_N];
REAL_T x[X_N];
memset(ix, 0, X_N * sizeof(int));
#endif
int l = 0;
for (int jp = 0; jp < X_nnz[i]; jp++)
{
REAL_T a = X_value[ROWMAJOR(i, jp, X_N, X_M)];
int j = X_index[ROWMAJOR(i, jp, X_N, X_M)];
if (j == i)
{
traceX = traceX + a;
}
for (int kp = 0; kp < X_nnz[j]; kp++)
{
int k = X_index[ROWMAJOR(j, kp, X_N, X_M)];
if (ix[k] == 0)
{
x[k] = 0.0;
//X2_index[ROWMAJOR(i, l, N, M)] = k;
jx[l] = k;
ix[k] = i + 1;
l++;
}
// TEMPORARY STORAGE VECTOR LENGTH FULL N
x[k] = x[k] + a * X_value[ROWMAJOR(j, kp, X_N, X_M)];
}
}
// Check for number of non-zeroes per row exceeded
if (l > X2_M)
{
LOG_ERROR("Number of non-zeroes per row > M, Increase M\n");
}
int ll = 0;
for (int j = 0; j < l; j++)
{
//int jp = X2_index[ROWMAJOR(i, j, N, M)];
int jp = jx[j];
REAL_T xtmp = x[jp];
if (jp == i)
{
traceX2 = traceX2 + xtmp;
X2_value[ROWMAJOR(i, ll, X2_N, X2_M)] = xtmp;
X2_index[ROWMAJOR(i, ll, X2_N, X2_M)] = jp;
ll++;
}
else if (is_above_threshold(xtmp, threshold))
{
X2_value[ROWMAJOR(i, ll, X2_N, X2_M)] = xtmp;
X2_index[ROWMAJOR(i, ll, X2_N, X2_M)] = jp;
ll++;
}
ix[jp] = 0;
x[jp] = 0.0;
}
X2_nnz[i] = ll;
}
trace[0] = traceX;
trace[1] = traceX2;
return trace;
}
/** Matrix multiply.
*
* \f$ C \leftarrow B \, A \f$
*
* \ingroup multiply_group
*
* \param A Matrix A
* \param B Matrix B
* \param C Matrix C
* \param threshold Used for sparse multiply
*/
void TYPED_FUNC(
bml_multiply_AB_ellsort) (
bml_matrix_ellsort_t * A,
bml_matrix_ellsort_t * B,
bml_matrix_ellsort_t * C,
double threshold)
{
int A_N = A->N;
int A_M = A->M;
int *A_nnz = A->nnz;
int *A_index = A->index;
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
int B_N = B->N;
int B_M = B->M;
int *B_nnz = B->nnz;
int *B_index = B->index;
int C_N = C->N;
int C_M = C->M;
int *C_nnz = C->nnz;
int *C_index = C->index;
REAL_T *A_value = (REAL_T *) A->value;
REAL_T *B_value = (REAL_T *) B->value;
REAL_T *C_value = (REAL_T *) C->value;
int myRank = bml_getMyRank();
#if !(defined(__IBMC__) || defined(__ibmxl__))
int ix[C->N], jx[C->N];
REAL_T x[C->N];
memset(ix, 0, C->N * sizeof(int));
memset(jx, 0, C->N * sizeof(int));
memset(x, 0.0, C->N * sizeof(REAL_T));
#endif
#if defined(__IBMC__) || defined(__ibmxl__)
#pragma omp parallel for \
shared(A_N, A_M, A_nnz, A_index, A_value) \
shared(A_localRowMin, A_localRowMax) \
shared(B_N, B_M, B_nnz, B_index, B_value) \
shared(C_N, C_M, C_nnz, C_index, C_value) \
shared(myRank)
#else
#pragma omp parallel for \
shared(A_N, A_M, A_nnz, A_index, A_value) \
shared(A_localRowMin, A_localRowMax) \
shared(B_N, B_M, B_nnz, B_index, B_value) \
shared(C_N, C_M, C_nnz, C_index, C_value) \
shared(myRank) \
firstprivate(ix, jx, x)
#endif
//for (int i = 0; i < A_N; i++)
for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++)
{
#if defined(__IBMC__) || defined(__ibmxl__)
int ix[C_N], jx[C_N];
REAL_T x[C_N];
memset(ix, 0, C_N * sizeof(int));
#endif
int l = 0;
for (int jp = 0; jp < A_nnz[i]; jp++)
{
REAL_T a = A_value[ROWMAJOR(i, jp, A_N, A_M)];
int j = A_index[ROWMAJOR(i, jp, A_N, A_M)];
for (int kp = 0; kp < B_nnz[j]; kp++)
{
int k = B_index[ROWMAJOR(j, kp, B_N, B_M)];
if (ix[k] == 0)
{
x[k] = 0.0;
//C_index[ROWMAJOR(i, l, N, M)] = k;
jx[l] = k;
ix[k] = i + 1;
l++;
}
// TEMPORARY STORAGE VECTOR LENGTH FULL N
x[k] = x[k] + a * B_value[ROWMAJOR(j, kp, B_N, B_M)];
}
}
// Check for number of non-zeroes per row exceeded
if (l > C_M)
{
LOG_ERROR("Number of non-zeroes per row > M, Increase M\n");
}
int ll = 0;
for (int j = 0; j < l; j++)
{
//int jp = C_index[ROWMAJOR(i, j, N, M)];
int jp = jx[j];
REAL_T xtmp = x[jp];
if (jp == i)
{
C_value[ROWMAJOR(i, ll, C_N, C_M)] = xtmp;
C_index[ROWMAJOR(i, ll, C_N, C_M)] = jp;
ll++;
}
else if (is_above_threshold(xtmp, threshold))
{
C_value[ROWMAJOR(i, ll, C_N, C_M)] = xtmp;
C_index[ROWMAJOR(i, ll, C_N, C_M)] = jp;
ll++;
}
ix[jp] = 0;
x[jp] = 0.0;
}
C_nnz[i] = ll;
}
}
/** Matrix multiply with threshold adjustment.
*
* \f$ C \leftarrow B \, A \f$
*
* \ingroup multiply_group
*
* \param A Matrix A
* \param B Matrix B
* \param C Matrix C
* \param threshold Used for sparse multiply
*/
void TYPED_FUNC(
bml_multiply_adjust_AB_ellsort) (
bml_matrix_ellsort_t * A,
bml_matrix_ellsort_t * B,
bml_matrix_ellsort_t * C,
double threshold)
{
int A_N = A->N;
int A_M = A->M;
int *A_nnz = A->nnz;
int *A_index = A->index;
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
int B_N = B->N;
int B_M = B->M;
int *B_nnz = B->nnz;
int *B_index = B->index;
int C_N = C->N;
int C_M = C->M;
int *C_nnz = C->nnz;
int *C_index = C->index;
int aflag = 1;
REAL_T *A_value = (REAL_T *) A->value;
REAL_T *B_value = (REAL_T *) B->value;
REAL_T *C_value = (REAL_T *) C->value;
REAL_T adjust_threshold = (REAL_T) threshold;
int myRank = bml_getMyRank();
#if !(defined(__IBMC__) || defined(__ibmxl__))
int ix[C->N], jx[C->N];
REAL_T x[C->N];
memset(ix, 0, C->N * sizeof(int));
memset(jx, 0, C->N * sizeof(int));
memset(x, 0.0, C->N * sizeof(REAL_T));
#endif
while (aflag > 0)
{
aflag = 0;
#if defined(__IBMC__) || defined(__ibmxl__)
#pragma omp parallel for \
shared(A_N, A_M, A_nnz, A_index, A_value) \
shared(A_localRowMin, A_localRowMax) \
shared(B_N, B_M, B_nnz, B_index, B_value) \
shared(C_N, C_M, C_nnz, C_index, C_value) \
shared(adjust_threshold, myRank) \
reduction(+:aflag)
#else
#pragma omp parallel for \
shared(A_N, A_M, A_nnz, A_index, A_value) \
shared(A_localRowMin, A_localRowMax) \
shared(B_N, B_M, B_nnz, B_index, B_value) \
shared(C_N, C_M, C_nnz, C_index, C_value) \
shared(adjust_threshold, myRank) \
firstprivate(ix, jx, x) \
reduction(+:aflag)
#endif
//for (int i = 0; i < A_N; i++)
for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++)
{
#if defined(__IBMC__) || defined(__ibmxl__)
int ix[C_N], jx[C_N];
REAL_T x[C_N];
memset(ix, 0, C_N * sizeof(int));
#endif
int l = 0;
for (int jp = 0; jp < A_nnz[i]; jp++)
{
REAL_T a = A_value[ROWMAJOR(i, jp, A_N, A_M)];
int j = A_index[ROWMAJOR(i, jp, A_N, A_M)];
for (int kp = 0; kp < B_nnz[j]; kp++)
{
int k = B_index[ROWMAJOR(j, kp, B_N, B_M)];
if (ix[k] == 0)
{
x[k] = 0.0;
jx[l] = k;
ix[k] = i + 1;
l++;
}
// TEMPORARY STORAGE VECTOR LENGTH FULL N
x[k] = x[k] + a * B_value[ROWMAJOR(j, kp, B_N, B_M)];
}
}
// Check for number of non-zeroes per row exceeded
// Need to adjust threshold
if (l > C_M)
{
aflag = 1;
}
int ll = 0;
for (int j = 0; j < l; j++)
{
//int jp = C_index[ROWMAJOR(i, j, N, M)];
int jp = jx[j];
REAL_T xtmp = x[jp];
// Diagonal elements are saved in first column
if (jp == i)
{
C_value[ROWMAJOR(i, ll, C_N, C_M)] = xtmp;
C_index[ROWMAJOR(i, ll, C_N, C_M)] = jp;
ll++;
}
else if (is_above_threshold(xtmp, adjust_threshold))
{
C_value[ROWMAJOR(i, ll, C_N, C_M)] = xtmp;
C_index[ROWMAJOR(i, ll, C_N, C_M)] = jp;
ll++;
}
ix[jp] = 0;
x[jp] = 0.0;
}
C_nnz[i] = ll;
}
adjust_threshold *= (REAL_T) 2.0;
}
}
|
DRB056-jacobi2d-tile-no.c | /**
* jacobi-2d-imper.c: This file is part of the PolyBench/C 3.2 test suite.
* Jacobi with array copying, no reduction. with tiling and nested SIMD.
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
* License: /LICENSE.OSU.txt
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include "polybench/polybench.h"
/* Include benchmark-specific header. */
/* Default data type is double, default size is 20x1000. */
#include "polybench/jacobi-2d-imper.h"
/* Array initialization. */
static void init_array(int n,double A[500 + 0][500 + 0],double B[500 + 0][500 + 0])
{
//int i;
//int j;
{
int c1;
int c2;
int c4;
int c3;
if (n >= 1) {
#pragma omp parallel for private(c1, c3, c4, c2)
for (c1 = 0; c1 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c1++) {
#pragma omp parallel for private(c2, c3, c4)
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
#pragma omp parallel for private(c3, c4)
for (c3 = 16 * c2; c3 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c3++) {
#pragma omp parallel for private(c4)
for (c4 = 16 * c1; c4 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c4++) {
A[c4][c3] = (((double )c4) * (c3 + 2) + 2) / n;
B[c4][c3] = (((double )c4) * (c3 + 3) + 3) / n;
}
}
}
}
}
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static void print_array(int n,double A[500 + 0][500 + 0])
{
int i;
int j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
fprintf(stderr,"%0.2lf ",A[i][j]);
if ((i * n + j) % 20 == 0)
fprintf(stderr,"\n");
}
fprintf(stderr,"\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static void kernel_jacobi_2d_imper(int tsteps,int n,double A[500 + 0][500 + 0],double B[500 + 0][500 + 0])
{
//int t;
//int i;
//int j;
//#pragma scop
{
int c0;
int c1;
int c3;
int c2;
int c4;
int c5;
if (n >= 3 && tsteps >= 1) {
for (c0 = 0; c0 <= (((n + 3 * tsteps + -4) * 16 < 0?((16 < 0?-((-(n + 3 * tsteps + -4) + 16 + 1) / 16) : -((-(n + 3 * tsteps + -4) + 16 - 1) / 16))) : (n + 3 * tsteps + -4) / 16)); c0++) {
#pragma omp parallel for private(c1, c5, c4, c2, c3)
for (c1 = (((2 * c0 * 3 < 0?-(-(2 * c0) / 3) : ((3 < 0?(-(2 * c0) + - 3 - 1) / - 3 : (2 * c0 + 3 - 1) / 3)))) > (((16 * c0 + -1 * tsteps + 1) * 16 < 0?-(-(16 * c0 + -1 * tsteps + 1) / 16) : ((16 < 0?(-(16 * c0 + -1 * tsteps + 1) + - 16 - 1) / - 16 : (16 * c0 + -1 * tsteps + 1 + 16 - 1) / 16))))?((2 * c0 * 3 < 0?-(-(2 * c0) / 3) : ((3 < 0?(-(2 * c0) + - 3 - 1) / - 3 : (2 * c0 + 3 - 1) / 3)))) : (((16 * c0 + -1 * tsteps + 1) * 16 < 0?-(-(16 * c0 + -1 * tsteps + 1) / 16) : ((16 < 0?(-(16 * c0 + -1 * tsteps + 1) + - 16 - 1) / - 16 : (16 * c0 + -1 * tsteps + 1 + 16 - 1) / 16))))); c1 <= (((((((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) < (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48))?(((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) : (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48)))) < c0?(((((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) < (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48))?(((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)) : (((32 * c0 + n + 29) * 48 < 0?((48 < 0?-((-(32 * c0 + n + 29) + 48 + 1) / 48) : -((-(32 * c0 + n + 29) + 48 - 1) / 48))) : (32 * c0 + n + 29) / 48)))) : c0)); c1++) {
for (c2 = ((((16 * c1 + -1 * n + -12) * 16 < 0?-(-(16 * c1 + -1 * n + -12) / 16) : ((16 < 0?(-(16 * c1 + -1 * n + -12) + - 16 - 1) / - 16 : (16 * c1 + -1 * n + -12 + 16 - 1) / 16)))) > 2 * c0 + -2 * c1?(((16 * c1 + -1 * n + -12) * 16 < 0?-(-(16 * c1 + -1 * n + -12) / 16) : ((16 < 0?(-(16 * c1 + -1 * n + -12) + - 16 - 1) / - 16 : (16 * c1 + -1 * n + -12 + 16 - 1) / 16)))) : 2 * c0 + -2 * c1); c2 <= (((((((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) < (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16))?(((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) : (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)))) < (((32 * c0 + -32 * c1 + n + 29) * 16 < 0?((16 < 0?-((-(32 * c0 + -32 * c1 + n + 29) + 16 + 1) / 16) : -((-(32 * c0 + -32 * c1 + n + 29) + 16 - 1) / 16))) : (32 * c0 + -32 * c1 + n + 29) / 16))?(((((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) < (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16))?(((16 * c1 + n + 12) * 16 < 0?((16 < 0?-((-(16 * c1 + n + 12) + 16 + 1) / 16) : -((-(16 * c1 + n + 12) + 16 - 1) / 16))) : (16 * c1 + n + 12) / 16)) : (((n + 2 * tsteps + -3) * 16 < 0?((16 < 0?-((-(n + 2 * tsteps + -3) + 16 + 1) / 16) : -((-(n + 2 * tsteps + -3) + 16 - 1) / 16))) : (n + 2 * tsteps + -3) / 16)))) : (((32 * c0 + -32 * c1 + n + 29) * 16 < 0?((16 < 0?-((-(32 * c0 + -32 * c1 + n + 29) + 16 + 1) / 16) : -((-(32 * c0 + -32 * c1 + n + 29) + 16 - 1) / 16))) : (32 * c0 + -32 * c1 + n + 29) / 16)))); c2++) {
if (c0 <= (((32 * c1 + 16 * c2 + -1 * n + 1) * 32 < 0?((32 < 0?-((-(32 * c1 + 16 * c2 + -1 * n + 1) + 32 + 1) / 32) : -((-(32 * c1 + 16 * c2 + -1 * n + 1) + 32 - 1) / 32))) : (32 * c1 + 16 * c2 + -1 * n + 1) / 32)) && c1 <= c2 + -1) {
if ((n + 1) % 2 == 0) {
for (c4 = (16 * c1 > 16 * c2 + -1 * n + 3?16 * c1 : 16 * c2 + -1 * n + 3); c4 <= 16 * c1 + 15; c4++) {
A[-16 * c2 + c4 + n + -2][n + -2] = B[-16 * c2 + c4 + n + -2][n + -2];
}
}
}
if (c0 <= (((48 * c1 + -1 * n + 1) * 32 < 0?((32 < 0?-((-(48 * c1 + -1 * n + 1) + 32 + 1) / 32) : -((-(48 * c1 + -1 * n + 1) + 32 - 1) / 32))) : (48 * c1 + -1 * n + 1) / 32)) && c1 >= c2) {
if ((n + 1) % 2 == 0) {
for (c5 = (16 * c2 > 16 * c1 + -1 * n + 3?16 * c2 : 16 * c1 + -1 * n + 3); c5 <= ((16 * c1 < 16 * c2 + 15?16 * c1 : 16 * c2 + 15)); c5++) {
A[n + -2][-16 * c1 + c5 + n + -2] = B[n + -2][-16 * c1 + c5 + n + -2];
}
}
}
for (c3 = ((((((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) > (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2))))?(((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) : (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2)))))) > 16 * c0 + -16 * c1?(((((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) > (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2))))?(((16 * c1 + -1 * n + 2) * 2 < 0?-(-(16 * c1 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c1 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c1 + -1 * n + 2 + 2 - 1) / 2)))) : (((16 * c2 + -1 * n + 2) * 2 < 0?-(-(16 * c2 + -1 * n + 2) / 2) : ((2 < 0?(-(16 * c2 + -1 * n + 2) + - 2 - 1) / - 2 : (16 * c2 + -1 * n + 2 + 2 - 1) / 2)))))) : 16 * c0 + -16 * c1); c3 <= ((((((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) < tsteps + -1?((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) : tsteps + -1)) < 16 * c0 + -16 * c1 + 15?((((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) < tsteps + -1?((8 * c1 + 6 < 8 * c2 + 6?8 * c1 + 6 : 8 * c2 + 6)) : tsteps + -1)) : 16 * c0 + -16 * c1 + 15)); c3++) {
if (c1 <= ((c3 * 8 < 0?((8 < 0?-((-c3 + 8 + 1) / 8) : -((-c3 + 8 - 1) / 8))) : c3 / 8))) {
for (c5 = (16 * c2 > 2 * c3 + 1?16 * c2 : 2 * c3 + 1); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -2?16 * c2 + 15 : 2 * c3 + n + -2)); c5++) {
B[1][-2 * c3 + c5] = 0.2 * (A[1][-2 * c3 + c5] + A[1][-2 * c3 + c5 - 1] + A[1][1 + (-2 * c3 + c5)] + A[1 + 1][-2 * c3 + c5] + A[1 - 1][-2 * c3 + c5]);
}
}
for (c4 = (16 * c1 > 2 * c3 + 2?16 * c1 : 2 * c3 + 2); c4 <= ((16 * c1 + 15 < 2 * c3 + n + -2?16 * c1 + 15 : 2 * c3 + n + -2)); c4++) {
if (c2 <= ((c3 * 8 < 0?((8 < 0?-((-c3 + 8 + 1) / 8) : -((-c3 + 8 - 1) / 8))) : c3 / 8))) {
B[-2 * c3 + c4][1] = 0.2 * (A[-2 * c3 + c4][1] + A[-2 * c3 + c4][1 - 1] + A[-2 * c3 + c4][1 + 1] + A[1 + (-2 * c3 + c4)][1] + A[-2 * c3 + c4 - 1][1]);
}
for (c5 = (16 * c2 > 2 * c3 + 2?16 * c2 : 2 * c3 + 2); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -2?16 * c2 + 15 : 2 * c3 + n + -2)); c5++) {
B[-2 * c3 + c4][-2 * c3 + c5] = 0.2 * (A[-2 * c3 + c4][-2 * c3 + c5] + A[-2 * c3 + c4][-2 * c3 + c5 - 1] + A[-2 * c3 + c4][1 + (-2 * c3 + c5)] + A[1 + (-2 * c3 + c4)][-2 * c3 + c5] + A[-2 * c3 + c4 - 1][-2 * c3 + c5]);
A[-2 * c3 + c4 + -1][-2 * c3 + c5 + -1] = B[-2 * c3 + c4 + -1][-2 * c3 + c5 + -1];
}
if (c2 >= (((2 * c3 + n + -16) * 16 < 0?-(-(2 * c3 + n + -16) / 16) : ((16 < 0?(-(2 * c3 + n + -16) + - 16 - 1) / - 16 : (2 * c3 + n + -16 + 16 - 1) / 16))))) {
A[-2 * c3 + c4 + -1][n + -2] = B[-2 * c3 + c4 + -1][n + -2];
}
}
if (c1 >= (((2 * c3 + n + -16) * 16 < 0?-(-(2 * c3 + n + -16) / 16) : ((16 < 0?(-(2 * c3 + n + -16) + - 16 - 1) / - 16 : (2 * c3 + n + -16 + 16 - 1) / 16))))) {
for (c5 = (16 * c2 > 2 * c3 + 2?16 * c2 : 2 * c3 + 2); c5 <= ((16 * c2 + 15 < 2 * c3 + n + -1?16 * c2 + 15 : 2 * c3 + n + -1)); c5++) {
A[n + -2][-2 * c3 + c5 + -1] = B[n + -2][-2 * c3 + c5 + -1];
}
}
}
if (c0 >= (((2 * c1 + c2 + -1) * 2 < 0?-(-(2 * c1 + c2 + -1) / 2) : ((2 < 0?(-(2 * c1 + c2 + -1) + - 2 - 1) / - 2 : (2 * c1 + c2 + -1 + 2 - 1) / 2)))) && c1 >= c2 + 1 && c2 <= (((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8))) {
for (c4 = 16 * c1; c4 <= ((16 * c1 + 15 < 16 * c2 + n + 12?16 * c1 + 15 : 16 * c2 + n + 12)); c4++) {
B[-16 * c2 + c4 + -14][1] = 0.2 * (A[-16 * c2 + c4 + -14][1] + A[-16 * c2 + c4 + -14][1 - 1] + A[-16 * c2 + c4 + -14][1 + 1] + A[1 + (-16 * c2 + c4 + -14)][1] + A[-16 * c2 + c4 + -14 - 1][1]);
}
}
if (c0 >= (((3 * c1 + -1) * 2 < 0?-(-(3 * c1 + -1) / 2) : ((2 < 0?(-(3 * c1 + -1) + - 2 - 1) / - 2 : (3 * c1 + -1 + 2 - 1) / 2)))) && c1 <= (((((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8)) < c2?(((tsteps + -8) * 8 < 0?((8 < 0?-((-(tsteps + -8) + 8 + 1) / 8) : -((-(tsteps + -8) + 8 - 1) / 8))) : (tsteps + -8) / 8)) : c2))) {
for (c5 = (16 * c2 > 16 * c1 + 15?16 * c2 : 16 * c1 + 15); c5 <= ((16 * c2 + 15 < 16 * c1 + n + 12?16 * c2 + 15 : 16 * c1 + n + 12)); c5++) {
B[1][-16 * c1 + c5 + -14] = 0.2 * (A[1][-16 * c1 + c5 + -14] + A[1][-16 * c1 + c5 + -14 - 1] + A[1][1 + (-16 * c1 + c5 + -14)] + A[1 + 1][-16 * c1 + c5 + -14] + A[1 - 1][-16 * c1 + c5 + -14]);
}
}
}
}
}
}
}
//#pragma endscop
}
int main(int argc,char **argv)
{
/* Retrieve problem size. */
int n = 500;
int tsteps = 10;
/* Variable declaration/allocation. */
double (*A)[500 + 0][500 + 0];
A = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
double (*B)[500 + 0][500 + 0];
B = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
/* Initialize array(s). */
init_array(n, *A, *B);
/* Start timer. */
polybench_timer_start();
;
/* Run kernel. */
kernel_jacobi_2d_imper(tsteps,n, *A, *B);
/* Stop and print timer. */
polybench_timer_stop();
;
polybench_timer_print();
;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
if (argc > 42 && !strcmp(argv[0],""))
print_array(n, *A);
/* Be clean. */
free(((void *)A));
;
free(((void *)B));
;
return 0;
}
|
GB_unop__identity_uint32_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint32_int16)
// op(A') function: GB (_unop_tran__identity_uint32_int16)
// C type: uint32_t
// A type: int16_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = (uint32_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint32_int16)
(
uint32_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint32_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
simple_prof_c.c | /*
* Copyright (c) 2015 - 2022, Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <stdint.h>
#include <mpi.h>
#include <omp.h>
#include "geopm_prof.h"
#include "geopm_hint.h"
int main(int argc, char **argv)
{
int err = 0;
int index = 0;
int rank = 0;
int num_iter = 100000000;
double sum = 0.0;
uint64_t region_id = 0;
err = MPI_Init(&argc, &argv);
if (!err) {
err = geopm_prof_region("loop_0", GEOPM_REGION_HINT_UNKNOWN, ®ion_id);
}
MPI_Barrier(MPI_COMM_WORLD);
if (!err) {
err = geopm_prof_enter(region_id);
}
if (!err) {
#pragma omp parallel default(shared) private(index)
{
(void)geopm_tprof_init(num_iter);
#pragma omp for reduction(+:sum)
for (index = 0; index < num_iter; ++index) {
sum += (double)index;
(void)geopm_tprof_post();
}
}
err = geopm_prof_exit(region_id);
}
if (!err) {
err = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
}
if (!err && !rank) {
printf("sum = %e\n\n", sum);
}
int tmp_err = MPI_Finalize();
return err ? err : tmp_err;
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 8;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
launch.h | #include <atomic>
typedef struct {
int gridSize; // Number of thread blocks per grid
int blockSize; // Number of threads per thread block
int smemSize; // Shared Memory Size
int stream; // associated stream
} launchConfig;
static int *kernels = nullptr;
static std::atomic<unsigned long> num_kernels = {0};
static std::atomic<unsigned long> synced_kernels = {0};
/// Kernel launch
template <typename Ty, typename Func, Func kernel, typename... Args>
void launch(const launchConfig &config, Ty *ptrA, Ty *ptrB, Args... args) {
int kernel_no = num_kernels++;
#pragma omp target teams is_device_ptr(ptrA, ptrB) num_teams(config.gridSize) \
thread_limit(config.blockSize) depend(out \
: kernels[kernel_no]) nowait
{
#pragma omp parallel
{ kernel(ptrA, ptrB, args...); }
}
}
/// Device Synchronization
void synchronize() {
unsigned long kernel_first = synced_kernels;
unsigned long kernel_last = num_kernels;
if (kernel_first < kernel_last) {
for (unsigned long i = kernel_first; i < kernel_last; ++i) {
#pragma omp task if(0) depend(in : kernels[i])
{}
}
synced_kernels.compare_exchange_strong(kernel_first, kernel_last);
}
}
|
ompHelloWorld.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
int main (int argc, char **argv) {
//number of parallel threads that OpenMP should use
int NumThreads = 4;
//tell OpenMP to use NumThreads threads
omp_set_num_threads(NumThreads);
#pragma omp parallel
{
int rank = omp_get_thread_num(); //thread's rank
int size = omp_get_num_threads(); //total number of threads
printf("Hello World from thread %d of %d \n", rank, size);
}
return 0;
}
|
GB_binop__le_bool.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_bool)
// A.*B function (eWiseMult): GB (_AemultB_01__le_bool)
// A.*B function (eWiseMult): GB (_AemultB_02__le_bool)
// A.*B function (eWiseMult): GB (_AemultB_03__le_bool)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_bool)
// A*D function (colscale): GB (_AxD__le_bool)
// D*A function (rowscale): GB (_DxB__le_bool)
// C+=B function (dense accum): GB (_Cdense_accumB__le_bool)
// C+=b function (dense accum): GB (_Cdense_accumb__le_bool)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_bool)
// C=scalar+B GB (_bind1st__le_bool)
// C=scalar+B' GB (_bind1st_tran__le_bool)
// C=A+scalar GB (_bind2nd__le_bool)
// C=A'+scalar GB (_bind2nd_tran__le_bool)
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
bool aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
bool bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_BOOL || GxB_NO_LE_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__le_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_bool)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_bool)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_bool)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_bool)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_bool)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__le_bool)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__le_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_bool)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_bool)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
bool bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_bool)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
bool aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_bool)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of
% pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/histogram.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
MagickRealType
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
MagickRealType
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
MagickRealType
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *),
SetGrayscaleImage(Image *);
static size_t
DefineImageColormap(Image *,CubeInfo *,NodeInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info));
if (quantize_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither=image_info->dither;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const CubeInfo *cube_info,
const PixelPacket *pixel,DoublePixelPacket *alpha_pixel)
{
MagickRealType
alpha;
alpha_pixel->index=0;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->opacity == OpaqueOpacity))
{
alpha_pixel->red=(MagickRealType) GetPixelRed(pixel);
alpha_pixel->green=(MagickRealType) GetPixelGreen(pixel);
alpha_pixel->blue=(MagickRealType) GetPixelBlue(pixel);
alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel);
return;
}
alpha=(MagickRealType) (QuantumScale*(QuantumRange-GetPixelOpacity(pixel)));
alpha_pixel->red=alpha*GetPixelRed(pixel);
alpha_pixel->green=alpha*GetPixelGreen(pixel);
alpha_pixel->blue=alpha*GetPixelBlue(pixel);
alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel);
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(GetPixelRed(pixel))) >> index) &
0x01) | ((ScaleQuantumToChar(ClampPixel(GetPixelGreen(pixel))) >> index) &
0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(GetPixelBlue(pixel))) >>
index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(GetPixelOpacity(pixel))) >> index) &
0x1) << 3;
return(id);
}
static inline MagickBooleanType IsSameColor(const Image *image,
const PixelPacket *p,const PixelPacket *q)
{
if ((GetPixelRed(p) != GetPixelRed(q)) ||
(GetPixelGreen(p) != GetPixelGreen(q)) ||
(GetPixelBlue(p) != GetPixelBlue(q)))
return(MagickFalse);
if ((image->matte != MagickFalse) &&
(GetPixelOpacity(p) != GetPixelOpacity(q)))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
{
#define AssignImageTag "Assign/Image"
ssize_t
y;
/*
Allocate image colormap.
*/
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace);
if (AcquireImageColormap(image,cube_info->colors) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
(void) DefineImageColormap(image,cube_info,cube_info->root);
/*
Create a reduced color image.
*/
if ((cube_info->quantize_info->dither != MagickFalse) &&
(cube_info->quantize_info->dither_method != NoDitherMethod))
(void) DitherImage(image,cube_info);
else
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
register const NodeInfo
*node_info;
register ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,q,q+count) == MagickFalse)
break;
AssociateAlphaPixel(&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*
(QuantumRange+1.0)+1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(indexes+x+i,index);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q,image->colormap+index);
if (cube.associate_alpha != MagickFalse)
SetPixelOpacity(q,image->colormap[index].opacity);
}
q++;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AssignImageColors)
#endif
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image);
if ((cube_info->quantize_info->number_colors == 2) &&
(cube_info->quantize_info->colorspace == GRAYColorspace))
{
double
intensity;
/*
Monochrome image.
*/
intensity=0.0;
if ((image->colors > 1) &&
(GetPixelLuma(image,image->colormap+0) >
GetPixelLuma(image,image->colormap+1)))
intensity=(double) QuantumRange;
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->matte;
if ((cube_info->quantize_info->number_colors == 2) &&
(cube_info->quantize_info->colorspace == GRAYColorspace))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
MagickRealType
bisect;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace);
midpoint.red=(MagickRealType) QuantumRange/2.0;
midpoint.green=(MagickRealType) QuantumRange/2.0;
midpoint.blue=(MagickRealType) QuantumRange/2.0;
midpoint.opacity=(MagickRealType) QuantumRange/2.0;
error.opacity=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,p,p+count) == MagickFalse)
break;
AssociateAlphaPixel(cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((MagickRealType) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.opacity+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.opacity=QuantumScale*(pixel.opacity-mid.opacity);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.opacity*error.opacity);
if (IsNaN(distance))
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(pixel.opacity);
else
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(OpaqueOpacity);
p+=count;
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,p,p+count) == MagickFalse)
break;
AssociateAlphaPixel(cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((MagickRealType) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.opacity+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.opacity=QuantumScale*(pixel.opacity-mid.opacity);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.opacity*error.opacity);
if (IsNaN(distance))
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.opacity+=count*QuantumScale*ClampPixel(
pixel.opacity);
else
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(OpaqueOpacity);
p+=count;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info));
if (clone_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither=quantize_info->dither;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
MagickRealType
pixel;
register DoublePixelPacket
*magick_restrict q;
register MagickRealType
alpha,
beta,
distance;
register PixelPacket
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p));
beta=(MagickRealType) (QuantumScale*GetPixelAlpha(q));
}
pixel=alpha*GetPixelRed(p)-beta*GetPixelRed(q);
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*GetPixelGreen(p)-beta*GetPixelGreen(q);
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*GetPixelBlue(p)-beta*GetPixelBlue(q);
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=GetPixelAlpha(p)-GetPixelAlpha(q);
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image,&image->exception) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero. DefineImageColormap() returns the number of
% colors in the image colormap.
%
% The format of the DefineImageColormap method is:
%
% size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
(void) DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
register MagickRealType
alpha;
register PixelPacket
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(MagickRealType) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.blue)));
SetPixelOpacity(q,OpaqueOpacity);
}
else
{
MagickRealType
opacity;
opacity=(MagickRealType) (alpha*QuantumRange*
node_info->total_color.opacity);
SetPixelOpacity(q,ClampToQuantum(opacity));
if (q->opacity == OpaqueOpacity)
{
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.blue)));
}
else
{
double
gamma;
gamma=(double) (QuantumScale*(QuantumRange-(double) q->opacity));
gamma=PerceptibleReciprocal(gamma);
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.blue)));
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
return(image->colors);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
register Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickSignature);
quantize_info->signature=(~MagickSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,
2*sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->opacity)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
DoublePixelPacket
**pixels;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
exception=(&image->exception);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
size_t
index;
ssize_t
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
register ssize_t
i;
ssize_t
u;
u=(y & 0x01) ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(&cube,q+u,&pixel);
if (x > 0)
{
pixel.red+=7*current[u-v].red/16;
pixel.green+=7*current[u-v].green/16;
pixel.blue+=7*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=7*current[u-v].opacity/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=previous[u+v].red/16;
pixel.green+=previous[u+v].green/16;
pixel.blue+=previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=previous[u+v].opacity/16;
}
pixel.red+=5*previous[u].red/16;
pixel.green+=5*previous[u].green/16;
pixel.blue+=5*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=5*previous[u].opacity/16;
if (x > 0)
{
pixel.red+=3*previous[u-v].red/16;
pixel.green+=3*previous[u-v].green/16;
pixel.blue+=3*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=3*previous[u-v].opacity/16;
}
}
pixel.red=(MagickRealType) ClampPixel(pixel.red);
pixel.green=(MagickRealType) ClampPixel(pixel.green);
pixel.blue=(MagickRealType) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*(QuantumRange+
1.0)+1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(indexes+u,index);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q+u,image->colormap+index);
if (cube.associate_alpha != MagickFalse)
SetPixelOpacity(q+u,image->colormap[index].opacity);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixel(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].opacity=pixel.opacity-color.opacity;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType
RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int);
static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info,
const size_t level,const unsigned int direction)
{
if (level == 1)
switch (direction)
{
case WestGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
break;
}
case EastGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
break;
}
case NorthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
break;
}
case SouthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
break;
}
case EastGravity:
{
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
break;
}
case NorthGravity:
{
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
break;
}
case SouthGravity:
{
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
break;
}
default:
break;
}
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction)
{
#define DitherImageTag "Dither/Image"
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
register CubeInfo
*p;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
ExceptionInfo
*exception;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
i;
/*
Distribute error.
*/
exception=(&image->exception);
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickFalse);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
AssociateAlphaPixel(cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=p->weights[i]*p->error[i].red;
pixel.green+=p->weights[i]*p->error[i].green;
pixel.blue+=p->weights[i]*p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.opacity+=p->weights[i]*p->error[i].opacity;
}
pixel.red=(MagickRealType) ClampPixel(pixel.red);
pixel.green=(MagickRealType) ClampPixel(pixel.green);
pixel.blue=(MagickRealType) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(MagickRealType) (4.0*(QuantumRange+1.0)*((MagickRealType)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) (1*p->cache[i]);
if (image->storage_class == PseudoClass)
*indexes=(IndexPacket) index;
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q,image->colormap+index);
if (cube_info->associate_alpha != MagickFalse)
SetPixelOpacity(q,image->colormap[index].opacity);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) CopyMagickMemory(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixel(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].opacity=pixel.opacity-color.opacity;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
depth;
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) ResetMagickMemory(cube_info->error,0,ErrorQueueLength*
sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows);
for (depth=1; i != 0; depth++)
i>>=1;
if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows))
depth++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,&image->exception);
if (depth > 1)
Riemersma(image,image_view,cube_info,depth-1,NorthGravity);
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
MagickRealType
sum,
weight;
register ssize_t
i;
size_t
length;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) ResetMagickMemory(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither == MagickFalse)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) ResetMagickMemory(cube_info->cache,(-1),sizeof(*cube_info->cache)*
length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight);
weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0));
}
/*
Normalize the weighting factors.
*/
weight=0.0;
for (i=0; i < ErrorQueueLength; i++)
weight+=cube_info->weights[i];
sum=0.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]/=weight;
sum+=cube_info->weights[i];
}
cube_info->weights[0]+=1.0-sum;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) ResetMagickMemory(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image)
%
% A description of each parameter follows.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
*indexes;
MagickRealType
alpha,
area,
beta,
distance,
maximum_error,
mean_error,
mean_error_per_pixel;
size_t
index;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,&image->exception);
(void) ResetMagickMemory(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
exception=(&image->exception);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=1UL*GetPixelIndex(indexes+x);
if (image->matte != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p)));
beta=(MagickRealType) (QuantumScale*(QuantumRange-
image->colormap[index].opacity));
}
distance=fabs((double) (alpha*GetPixelRed(p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p++;
}
}
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area;
image->error.normalized_mean_error=(double) QuantumScale*QuantumScale*
mean_error/area;
image->error.normalized_maximum_error=(double) QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) ResetMagickMemory(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither=MagickTrue;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const MagickBooleanType dither)
% MagickBooleanType PosterizeImageChannel(Image *image,
% const ChannelType channel,const size_t levels,
% const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither: Set this integer value to something other than zero to dither
% the mapped image.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const MagickBooleanType dither)
{
MagickBooleanType
status;
status=PosterizeImageChannel(image,DefaultChannels,levels,dither);
return(status);
}
MagickExport MagickBooleanType PosterizeImageChannel(Image *image,
const ChannelType channel,const size_t levels,const MagickBooleanType dither)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \
QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,1,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=PosterizePixel(image->colormap[i].red);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=PosterizePixel(image->colormap[i].green);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=PosterizePixel(image->colormap[i].blue);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=PosterizePixel(image->colormap[i].opacity);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,PosterizePixel(GetPixelRed(q)));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,PosterizePixel(GetPixelGreen(q)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,PosterizePixel(GetPixelBlue(q)));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,PosterizePixel(GetPixelOpacity(q)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,PosterizePixel(GetPixelIndex(indexes+x)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_PosterizeImageChannel)
#endif
proceed=SetImageProgress(image,PosterizeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither=dither;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.opacity+=node_info->total_color.opacity;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
*/
static MagickBooleanType DirectToColormapImage(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
number_colors;
ssize_t
y;
status=MagickTrue;
number_colors=(size_t) (image->columns*image->rows);
if (AcquireImageColormap(image,number_colors) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->colors != number_colors)
return(MagickFalse);
i=0;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
proceed;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
image->colormap[i].red=GetPixelRed(q);
image->colormap[i].green=GetPixelGreen(q);
image->colormap[i].blue=GetPixelBlue(q);
image->colormap[i].opacity=GetPixelOpacity(q);
SetPixelIndex(indexes+x,i);
i++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
break;
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
if (image->matte == MagickFalse)
{
if ((image->columns*image->rows) <= maximum_colors)
(void) DirectToColormapImage(image,&image->exception);
if (SetImageGray(image,&image->exception) != MagickFalse)
(void) SetGrayscaleImage(image);
}
if ((image->storage_class == PseudoClass) &&
(image->colors <= maximum_colors))
{
if ((quantize_info->colorspace != UndefinedColorspace) &&
(quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace(image,quantize_info->colorspace);
return(MagickTrue);
}
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither != MagickFalse) && (depth > 2))
depth--;
if ((image->matte != MagickFalse) && (depth > 5))
depth--;
if (SetImageGray(image,&image->exception) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,&image->exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image if it contains more than the
maximum, otherwise we can disable dithering to improve the performance.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
else
cube_info->quantize_info->dither_method=NoDitherMethod;
status=AssignImageColors(image,cube_info);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
register ssize_t
i;
size_t
depth,
maximum_colors,
number_images;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither != MagickFalse)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(&images->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,&image->exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% MagickRealType *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,
MagickRealType *quantize_error)
{
register ssize_t
i;
size_t
n,
number_children;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int MagickRealTypeCompare(const void *error_p,const void *error_q)
{
MagickRealType
*p,
*q;
p=(MagickRealType *) error_p;
q=(MagickRealType *) error_q;
if (*p > *q)
return(1);
if (fabs((double) (*q-*p)) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
MagickRealType
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(MagickRealType *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (MagickRealType *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(MagickRealType),
MagickRealTypeCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(MagickRealType *) RelinquishMagickMemory(
quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest color from
% a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,&image->exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,&image->exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image)
%
% A description of each parameter follows:
%
% o image: The image.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
PixelPacket
*color_1,
*color_2;
int
intensity;
color_1=(PixelPacket *) x;
color_2=(PixelPacket *) y;
intensity=PixelPacketIntensity(color_1)-(int) PixelPacketIntensity(color_2);
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
PixelPacket
*colormap;
register ssize_t
i;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace);
colormap_index=(ssize_t *) AcquireQuantumMemory(MaxColormapSize,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
ExceptionInfo
*exception;
(void) ResetMagickMemory(colormap_index,(-1),MaxColormapSize*
sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=GetPixelRed(q);
image->colormap[image->colors].green=GetPixelGreen(q);
image->colormap[image->colors].blue=GetPixelBlue(q);
image->colors++;
}
}
SetPixelIndex(indexes+x,colormap_index[intensity]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].opacity=(unsigned short) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelPacket),
IntensityCompare);
colormap=(PixelPacket *) AcquireQuantumMemory(image->colors,
sizeof(*colormap));
if (colormap == (PixelPacket *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsSameColor(image,&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].opacity]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,colormap_index[ScaleQuantumToMap(GetPixelIndex(
indexes+x))]);
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,&image->exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
|
c-typeck.c | /* Build expressions with type checking for C compiler.
Copyright (C) 1987-2020 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* This file is part of the C front end.
It contains routines to build C expressions given their operands,
including computing the types of the result, C-specific error checks,
and some optimization. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "memmodel.h"
#include "target.h"
#include "function.h"
#include "bitmap.h"
#include "c-tree.h"
#include "gimple-expr.h"
#include "predict.h"
#include "stor-layout.h"
#include "trans-mem.h"
#include "varasm.h"
#include "stmt.h"
#include "langhooks.h"
#include "c-lang.h"
#include "intl.h"
#include "tree-iterator.h"
#include "gimplify.h"
#include "tree-inline.h"
#include "omp-general.h"
#include "c-family/c-objc.h"
#include "c-family/c-ubsan.h"
#include "gomp-constants.h"
#include "spellcheck-tree.h"
#include "gcc-rich-location.h"
#include "stringpool.h"
#include "attribs.h"
#include "asan.h"
/* Possible cases of implicit bad conversions. Used to select
diagnostic messages in convert_for_assignment. */
enum impl_conv {
ic_argpass,
ic_assign,
ic_init,
ic_return
};
/* The level of nesting inside "__alignof__". */
int in_alignof;
/* The level of nesting inside "sizeof". */
int in_sizeof;
/* The level of nesting inside "typeof". */
int in_typeof;
/* The argument of last parsed sizeof expression, only to be tested
if expr.original_code == SIZEOF_EXPR. */
tree c_last_sizeof_arg;
location_t c_last_sizeof_loc;
/* Nonzero if we might need to print a "missing braces around
initializer" message within this initializer. */
static int found_missing_braces;
static int require_constant_value;
static int require_constant_elements;
static bool null_pointer_constant_p (const_tree);
static tree qualify_type (tree, tree);
static int tagged_types_tu_compatible_p (const_tree, const_tree, bool *,
bool *);
static int comp_target_types (location_t, tree, tree);
static int function_types_compatible_p (const_tree, const_tree, bool *,
bool *);
static int type_lists_compatible_p (const_tree, const_tree, bool *, bool *);
static tree lookup_field (tree, tree);
static int convert_arguments (location_t, vec<location_t>, tree,
vec<tree, va_gc> *, vec<tree, va_gc> *, tree,
tree);
static tree pointer_diff (location_t, tree, tree, tree *);
static tree convert_for_assignment (location_t, location_t, tree, tree, tree,
enum impl_conv, bool, tree, tree, int,
int = 0);
static tree valid_compound_expr_initializer (tree, tree);
static void push_string (const char *);
static void push_member_name (tree);
static int spelling_length (void);
static char *print_spelling (char *);
static void warning_init (location_t, int, const char *);
static tree digest_init (location_t, tree, tree, tree, bool, bool, int);
static void output_init_element (location_t, tree, tree, bool, tree, tree, bool,
bool, struct obstack *);
static void output_pending_init_elements (int, struct obstack *);
static bool set_designator (location_t, bool, struct obstack *);
static void push_range_stack (tree, struct obstack *);
static void add_pending_init (location_t, tree, tree, tree, bool,
struct obstack *);
static void set_nonincremental_init (struct obstack *);
static void set_nonincremental_init_from_string (tree, struct obstack *);
static tree find_init_member (tree, struct obstack *);
static void readonly_warning (tree, enum lvalue_use);
static int lvalue_or_else (location_t, const_tree, enum lvalue_use);
static void record_maybe_used_decl (tree);
static int comptypes_internal (const_tree, const_tree, bool *, bool *);
/* Return true if EXP is a null pointer constant, false otherwise. */
static bool
null_pointer_constant_p (const_tree expr)
{
/* This should really operate on c_expr structures, but they aren't
yet available everywhere required. */
tree type = TREE_TYPE (expr);
return (TREE_CODE (expr) == INTEGER_CST
&& !TREE_OVERFLOW (expr)
&& integer_zerop (expr)
&& (INTEGRAL_TYPE_P (type)
|| (TREE_CODE (type) == POINTER_TYPE
&& VOID_TYPE_P (TREE_TYPE (type))
&& TYPE_QUALS (TREE_TYPE (type)) == TYPE_UNQUALIFIED)));
}
/* EXPR may appear in an unevaluated part of an integer constant
expression, but not in an evaluated part. Wrap it in a
C_MAYBE_CONST_EXPR, or mark it with TREE_OVERFLOW if it is just an
INTEGER_CST and we cannot create a C_MAYBE_CONST_EXPR. */
static tree
note_integer_operands (tree expr)
{
tree ret;
if (TREE_CODE (expr) == INTEGER_CST && in_late_binary_op)
{
ret = copy_node (expr);
TREE_OVERFLOW (ret) = 1;
}
else
{
ret = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (expr), NULL_TREE, expr);
C_MAYBE_CONST_EXPR_INT_OPERANDS (ret) = 1;
}
return ret;
}
/* Having checked whether EXPR may appear in an unevaluated part of an
integer constant expression and found that it may, remove any
C_MAYBE_CONST_EXPR noting this fact and return the resulting
expression. */
static inline tree
remove_c_maybe_const_expr (tree expr)
{
if (TREE_CODE (expr) == C_MAYBE_CONST_EXPR)
return C_MAYBE_CONST_EXPR_EXPR (expr);
else
return expr;
}
/* This is a cache to hold if two types are compatible or not. */
struct tagged_tu_seen_cache {
const struct tagged_tu_seen_cache * next;
const_tree t1;
const_tree t2;
/* The return value of tagged_types_tu_compatible_p if we had seen
these two types already. */
int val;
};
static const struct tagged_tu_seen_cache * tagged_tu_seen_base;
static void free_all_tagged_tu_seen_up_to (const struct tagged_tu_seen_cache *);
/* Do `exp = require_complete_type (loc, exp);' to make sure exp
does not have an incomplete type. (That includes void types.)
LOC is the location of the use. */
tree
require_complete_type (location_t loc, tree value)
{
tree type = TREE_TYPE (value);
if (error_operand_p (value))
return error_mark_node;
/* First, detect a valid value with a complete type. */
if (COMPLETE_TYPE_P (type))
return value;
c_incomplete_type_error (loc, value, type);
return error_mark_node;
}
/* Print an error message for invalid use of an incomplete type.
VALUE is the expression that was used (or 0 if that isn't known)
and TYPE is the type that was invalid. LOC is the location for
the error. */
void
c_incomplete_type_error (location_t loc, const_tree value, const_tree type)
{
/* Avoid duplicate error message. */
if (TREE_CODE (type) == ERROR_MARK)
return;
if (value != NULL_TREE && (VAR_P (value) || TREE_CODE (value) == PARM_DECL))
error_at (loc, "%qD has an incomplete type %qT", value, type);
else
{
retry:
/* We must print an error message. Be clever about what it says. */
switch (TREE_CODE (type))
{
case RECORD_TYPE:
case UNION_TYPE:
case ENUMERAL_TYPE:
break;
case VOID_TYPE:
error_at (loc, "invalid use of void expression");
return;
case ARRAY_TYPE:
if (TYPE_DOMAIN (type))
{
if (TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL)
{
error_at (loc, "invalid use of flexible array member");
return;
}
type = TREE_TYPE (type);
goto retry;
}
error_at (loc, "invalid use of array with unspecified bounds");
return;
default:
gcc_unreachable ();
}
if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
error_at (loc, "invalid use of undefined type %qT", type);
else
/* If this type has a typedef-name, the TYPE_NAME is a TYPE_DECL. */
error_at (loc, "invalid use of incomplete typedef %qT", type);
}
}
/* Given a type, apply default promotions wrt unnamed function
arguments and return the new type. */
tree
c_type_promotes_to (tree type)
{
tree ret = NULL_TREE;
if (TYPE_MAIN_VARIANT (type) == float_type_node)
ret = double_type_node;
else if (c_promoting_integer_type_p (type))
{
/* Preserve unsignedness if not really getting any wider. */
if (TYPE_UNSIGNED (type)
&& (TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node)))
ret = unsigned_type_node;
else
ret = integer_type_node;
}
if (ret != NULL_TREE)
return (TYPE_ATOMIC (type)
? c_build_qualified_type (ret, TYPE_QUAL_ATOMIC)
: ret);
return type;
}
/* Return true if between two named address spaces, whether there is a superset
named address space that encompasses both address spaces. If there is a
superset, return which address space is the superset. */
static bool
addr_space_superset (addr_space_t as1, addr_space_t as2, addr_space_t *common)
{
if (as1 == as2)
{
*common = as1;
return true;
}
else if (targetm.addr_space.subset_p (as1, as2))
{
*common = as2;
return true;
}
else if (targetm.addr_space.subset_p (as2, as1))
{
*common = as1;
return true;
}
else
return false;
}
/* Return a variant of TYPE which has all the type qualifiers of LIKE
as well as those of TYPE. */
static tree
qualify_type (tree type, tree like)
{
addr_space_t as_type = TYPE_ADDR_SPACE (type);
addr_space_t as_like = TYPE_ADDR_SPACE (like);
addr_space_t as_common;
/* If the two named address spaces are different, determine the common
superset address space. If there isn't one, raise an error. */
if (!addr_space_superset (as_type, as_like, &as_common))
{
as_common = as_type;
error ("%qT and %qT are in disjoint named address spaces",
type, like);
}
return c_build_qualified_type (type,
TYPE_QUALS_NO_ADDR_SPACE (type)
| TYPE_QUALS_NO_ADDR_SPACE_NO_ATOMIC (like)
| ENCODE_QUAL_ADDR_SPACE (as_common));
}
/* Return true iff the given tree T is a variable length array. */
bool
c_vla_type_p (const_tree t)
{
if (TREE_CODE (t) == ARRAY_TYPE
&& C_TYPE_VARIABLE_SIZE (t))
return true;
return false;
}
/* If NTYPE is a type of a non-variadic function with a prototype
and OTYPE is a type of a function without a prototype and ATTRS
contains attribute format, diagnosess and removes it from ATTRS.
Returns the result of build_type_attribute_variant of NTYPE and
the (possibly) modified ATTRS. */
static tree
build_functype_attribute_variant (tree ntype, tree otype, tree attrs)
{
if (!prototype_p (otype)
&& prototype_p (ntype)
&& lookup_attribute ("format", attrs))
{
warning_at (input_location, OPT_Wattributes,
"%qs attribute cannot be applied to a function that "
"does not take variable arguments", "format");
attrs = remove_attribute ("format", attrs);
}
return build_type_attribute_variant (ntype, attrs);
}
/* Return the composite type of two compatible types.
We assume that comptypes has already been done and returned
nonzero; if that isn't so, this may crash. In particular, we
assume that qualifiers match. */
tree
composite_type (tree t1, tree t2)
{
enum tree_code code1;
enum tree_code code2;
tree attributes;
/* Save time if the two types are the same. */
if (t1 == t2) return t1;
/* If one type is nonsense, use the other. */
if (t1 == error_mark_node)
return t2;
if (t2 == error_mark_node)
return t1;
code1 = TREE_CODE (t1);
code2 = TREE_CODE (t2);
/* Merge the attributes. */
attributes = targetm.merge_type_attributes (t1, t2);
/* If one is an enumerated type and the other is the compatible
integer type, the composite type might be either of the two
(DR#013 question 3). For consistency, use the enumerated type as
the composite type. */
if (code1 == ENUMERAL_TYPE && code2 == INTEGER_TYPE)
return t1;
if (code2 == ENUMERAL_TYPE && code1 == INTEGER_TYPE)
return t2;
gcc_assert (code1 == code2);
switch (code1)
{
case POINTER_TYPE:
/* For two pointers, do this recursively on the target type. */
{
tree pointed_to_1 = TREE_TYPE (t1);
tree pointed_to_2 = TREE_TYPE (t2);
tree target = composite_type (pointed_to_1, pointed_to_2);
t1 = build_pointer_type_for_mode (target, TYPE_MODE (t1), false);
t1 = build_type_attribute_variant (t1, attributes);
return qualify_type (t1, t2);
}
case ARRAY_TYPE:
{
tree elt = composite_type (TREE_TYPE (t1), TREE_TYPE (t2));
int quals;
tree unqual_elt;
tree d1 = TYPE_DOMAIN (t1);
tree d2 = TYPE_DOMAIN (t2);
bool d1_variable, d2_variable;
bool d1_zero, d2_zero;
bool t1_complete, t2_complete;
/* We should not have any type quals on arrays at all. */
gcc_assert (!TYPE_QUALS_NO_ADDR_SPACE (t1)
&& !TYPE_QUALS_NO_ADDR_SPACE (t2));
t1_complete = COMPLETE_TYPE_P (t1);
t2_complete = COMPLETE_TYPE_P (t2);
d1_zero = d1 == NULL_TREE || !TYPE_MAX_VALUE (d1);
d2_zero = d2 == NULL_TREE || !TYPE_MAX_VALUE (d2);
d1_variable = (!d1_zero
&& (TREE_CODE (TYPE_MIN_VALUE (d1)) != INTEGER_CST
|| TREE_CODE (TYPE_MAX_VALUE (d1)) != INTEGER_CST));
d2_variable = (!d2_zero
&& (TREE_CODE (TYPE_MIN_VALUE (d2)) != INTEGER_CST
|| TREE_CODE (TYPE_MAX_VALUE (d2)) != INTEGER_CST));
d1_variable = d1_variable || (d1_zero && c_vla_type_p (t1));
d2_variable = d2_variable || (d2_zero && c_vla_type_p (t2));
/* Save space: see if the result is identical to one of the args. */
if (elt == TREE_TYPE (t1) && TYPE_DOMAIN (t1)
&& (d2_variable || d2_zero || !d1_variable))
return build_type_attribute_variant (t1, attributes);
if (elt == TREE_TYPE (t2) && TYPE_DOMAIN (t2)
&& (d1_variable || d1_zero || !d2_variable))
return build_type_attribute_variant (t2, attributes);
if (elt == TREE_TYPE (t1) && !TYPE_DOMAIN (t2) && !TYPE_DOMAIN (t1))
return build_type_attribute_variant (t1, attributes);
if (elt == TREE_TYPE (t2) && !TYPE_DOMAIN (t2) && !TYPE_DOMAIN (t1))
return build_type_attribute_variant (t2, attributes);
/* Merge the element types, and have a size if either arg has
one. We may have qualifiers on the element types. To set
up TYPE_MAIN_VARIANT correctly, we need to form the
composite of the unqualified types and add the qualifiers
back at the end. */
quals = TYPE_QUALS (strip_array_types (elt));
unqual_elt = c_build_qualified_type (elt, TYPE_UNQUALIFIED);
t1 = build_array_type (unqual_elt,
TYPE_DOMAIN ((TYPE_DOMAIN (t1)
&& (d2_variable
|| d2_zero
|| !d1_variable))
? t1
: t2));
/* Ensure a composite type involving a zero-length array type
is a zero-length type not an incomplete type. */
if (d1_zero && d2_zero
&& (t1_complete || t2_complete)
&& !COMPLETE_TYPE_P (t1))
{
TYPE_SIZE (t1) = bitsize_zero_node;
TYPE_SIZE_UNIT (t1) = size_zero_node;
}
t1 = c_build_qualified_type (t1, quals);
return build_type_attribute_variant (t1, attributes);
}
case ENUMERAL_TYPE:
case RECORD_TYPE:
case UNION_TYPE:
if (attributes != NULL)
{
/* Try harder not to create a new aggregate type. */
if (attribute_list_equal (TYPE_ATTRIBUTES (t1), attributes))
return t1;
if (attribute_list_equal (TYPE_ATTRIBUTES (t2), attributes))
return t2;
}
return build_type_attribute_variant (t1, attributes);
case FUNCTION_TYPE:
/* Function types: prefer the one that specified arg types.
If both do, merge the arg types. Also merge the return types. */
{
tree valtype = composite_type (TREE_TYPE (t1), TREE_TYPE (t2));
tree p1 = TYPE_ARG_TYPES (t1);
tree p2 = TYPE_ARG_TYPES (t2);
int len;
tree newargs, n;
int i;
/* Save space: see if the result is identical to one of the args. */
if (valtype == TREE_TYPE (t1) && !TYPE_ARG_TYPES (t2))
return build_functype_attribute_variant (t1, t2, attributes);
if (valtype == TREE_TYPE (t2) && !TYPE_ARG_TYPES (t1))
return build_functype_attribute_variant (t2, t1, attributes);
/* Simple way if one arg fails to specify argument types. */
if (TYPE_ARG_TYPES (t1) == NULL_TREE)
{
t1 = build_function_type (valtype, TYPE_ARG_TYPES (t2));
t1 = build_type_attribute_variant (t1, attributes);
return qualify_type (t1, t2);
}
if (TYPE_ARG_TYPES (t2) == NULL_TREE)
{
t1 = build_function_type (valtype, TYPE_ARG_TYPES (t1));
t1 = build_type_attribute_variant (t1, attributes);
return qualify_type (t1, t2);
}
/* If both args specify argument types, we must merge the two
lists, argument by argument. */
for (len = 0, newargs = p1;
newargs && newargs != void_list_node;
len++, newargs = TREE_CHAIN (newargs))
;
for (i = 0; i < len; i++)
newargs = tree_cons (NULL_TREE, NULL_TREE, newargs);
n = newargs;
for (; p1 && p1 != void_list_node;
p1 = TREE_CHAIN (p1), p2 = TREE_CHAIN (p2), n = TREE_CHAIN (n))
{
/* A null type means arg type is not specified.
Take whatever the other function type has. */
if (TREE_VALUE (p1) == NULL_TREE)
{
TREE_VALUE (n) = TREE_VALUE (p2);
goto parm_done;
}
if (TREE_VALUE (p2) == NULL_TREE)
{
TREE_VALUE (n) = TREE_VALUE (p1);
goto parm_done;
}
/* Given wait (union {union wait *u; int *i} *)
and wait (union wait *),
prefer union wait * as type of parm. */
if (TREE_CODE (TREE_VALUE (p1)) == UNION_TYPE
&& TREE_VALUE (p1) != TREE_VALUE (p2))
{
tree memb;
tree mv2 = TREE_VALUE (p2);
if (mv2 && mv2 != error_mark_node
&& TREE_CODE (mv2) != ARRAY_TYPE)
mv2 = TYPE_MAIN_VARIANT (mv2);
for (memb = TYPE_FIELDS (TREE_VALUE (p1));
memb; memb = DECL_CHAIN (memb))
{
tree mv3 = TREE_TYPE (memb);
if (mv3 && mv3 != error_mark_node
&& TREE_CODE (mv3) != ARRAY_TYPE)
mv3 = TYPE_MAIN_VARIANT (mv3);
if (comptypes (mv3, mv2))
{
TREE_VALUE (n) = composite_type (TREE_TYPE (memb),
TREE_VALUE (p2));
pedwarn (input_location, OPT_Wpedantic,
"function types not truly compatible in ISO C");
goto parm_done;
}
}
}
if (TREE_CODE (TREE_VALUE (p2)) == UNION_TYPE
&& TREE_VALUE (p2) != TREE_VALUE (p1))
{
tree memb;
tree mv1 = TREE_VALUE (p1);
if (mv1 && mv1 != error_mark_node
&& TREE_CODE (mv1) != ARRAY_TYPE)
mv1 = TYPE_MAIN_VARIANT (mv1);
for (memb = TYPE_FIELDS (TREE_VALUE (p2));
memb; memb = DECL_CHAIN (memb))
{
tree mv3 = TREE_TYPE (memb);
if (mv3 && mv3 != error_mark_node
&& TREE_CODE (mv3) != ARRAY_TYPE)
mv3 = TYPE_MAIN_VARIANT (mv3);
if (comptypes (mv3, mv1))
{
TREE_VALUE (n) = composite_type (TREE_TYPE (memb),
TREE_VALUE (p1));
pedwarn (input_location, OPT_Wpedantic,
"function types not truly compatible in ISO C");
goto parm_done;
}
}
}
TREE_VALUE (n) = composite_type (TREE_VALUE (p1), TREE_VALUE (p2));
parm_done: ;
}
t1 = build_function_type (valtype, newargs);
t1 = qualify_type (t1, t2);
}
/* FALLTHRU */
default:
return build_type_attribute_variant (t1, attributes);
}
}
/* Return the type of a conditional expression between pointers to
possibly differently qualified versions of compatible types.
We assume that comp_target_types has already been done and returned
nonzero; if that isn't so, this may crash. */
static tree
common_pointer_type (tree t1, tree t2)
{
tree attributes;
tree pointed_to_1, mv1;
tree pointed_to_2, mv2;
tree target;
unsigned target_quals;
addr_space_t as1, as2, as_common;
int quals1, quals2;
/* Save time if the two types are the same. */
if (t1 == t2) return t1;
/* If one type is nonsense, use the other. */
if (t1 == error_mark_node)
return t2;
if (t2 == error_mark_node)
return t1;
gcc_assert (TREE_CODE (t1) == POINTER_TYPE
&& TREE_CODE (t2) == POINTER_TYPE);
/* Merge the attributes. */
attributes = targetm.merge_type_attributes (t1, t2);
/* Find the composite type of the target types, and combine the
qualifiers of the two types' targets. Do not lose qualifiers on
array element types by taking the TYPE_MAIN_VARIANT. */
mv1 = pointed_to_1 = TREE_TYPE (t1);
mv2 = pointed_to_2 = TREE_TYPE (t2);
if (TREE_CODE (mv1) != ARRAY_TYPE)
mv1 = TYPE_MAIN_VARIANT (pointed_to_1);
if (TREE_CODE (mv2) != ARRAY_TYPE)
mv2 = TYPE_MAIN_VARIANT (pointed_to_2);
target = composite_type (mv1, mv2);
/* Strip array types to get correct qualifier for pointers to arrays */
quals1 = TYPE_QUALS_NO_ADDR_SPACE (strip_array_types (pointed_to_1));
quals2 = TYPE_QUALS_NO_ADDR_SPACE (strip_array_types (pointed_to_2));
/* For function types do not merge const qualifiers, but drop them
if used inconsistently. The middle-end uses these to mark const
and noreturn functions. */
if (TREE_CODE (pointed_to_1) == FUNCTION_TYPE)
target_quals = (quals1 & quals2);
else
target_quals = (quals1 | quals2);
/* If the two named address spaces are different, determine the common
superset address space. This is guaranteed to exist due to the
assumption that comp_target_type returned non-zero. */
as1 = TYPE_ADDR_SPACE (pointed_to_1);
as2 = TYPE_ADDR_SPACE (pointed_to_2);
if (!addr_space_superset (as1, as2, &as_common))
gcc_unreachable ();
target_quals |= ENCODE_QUAL_ADDR_SPACE (as_common);
t1 = build_pointer_type (c_build_qualified_type (target, target_quals));
return build_type_attribute_variant (t1, attributes);
}
/* Return the common type for two arithmetic types under the usual
arithmetic conversions. The default conversions have already been
applied, and enumerated types converted to their compatible integer
types. The resulting type is unqualified and has no attributes.
This is the type for the result of most arithmetic operations
if the operands have the given two types. */
static tree
c_common_type (tree t1, tree t2)
{
enum tree_code code1;
enum tree_code code2;
/* If one type is nonsense, use the other. */
if (t1 == error_mark_node)
return t2;
if (t2 == error_mark_node)
return t1;
if (TYPE_QUALS (t1) != TYPE_UNQUALIFIED)
t1 = TYPE_MAIN_VARIANT (t1);
if (TYPE_QUALS (t2) != TYPE_UNQUALIFIED)
t2 = TYPE_MAIN_VARIANT (t2);
if (TYPE_ATTRIBUTES (t1) != NULL_TREE)
t1 = build_type_attribute_variant (t1, NULL_TREE);
if (TYPE_ATTRIBUTES (t2) != NULL_TREE)
t2 = build_type_attribute_variant (t2, NULL_TREE);
/* Save time if the two types are the same. */
if (t1 == t2) return t1;
code1 = TREE_CODE (t1);
code2 = TREE_CODE (t2);
gcc_assert (code1 == VECTOR_TYPE || code1 == COMPLEX_TYPE
|| code1 == FIXED_POINT_TYPE || code1 == REAL_TYPE
|| code1 == INTEGER_TYPE);
gcc_assert (code2 == VECTOR_TYPE || code2 == COMPLEX_TYPE
|| code2 == FIXED_POINT_TYPE || code2 == REAL_TYPE
|| code2 == INTEGER_TYPE);
/* When one operand is a decimal float type, the other operand cannot be
a generic float type or a complex type. We also disallow vector types
here. */
if ((DECIMAL_FLOAT_TYPE_P (t1) || DECIMAL_FLOAT_TYPE_P (t2))
&& !(DECIMAL_FLOAT_TYPE_P (t1) && DECIMAL_FLOAT_TYPE_P (t2)))
{
if (code1 == VECTOR_TYPE || code2 == VECTOR_TYPE)
{
error ("cannot mix operands of decimal floating and vector types");
return error_mark_node;
}
if (code1 == COMPLEX_TYPE || code2 == COMPLEX_TYPE)
{
error ("cannot mix operands of decimal floating and complex types");
return error_mark_node;
}
if (code1 == REAL_TYPE && code2 == REAL_TYPE)
{
error ("cannot mix operands of decimal floating "
"and other floating types");
return error_mark_node;
}
}
/* If one type is a vector type, return that type. (How the usual
arithmetic conversions apply to the vector types extension is not
precisely specified.) */
if (code1 == VECTOR_TYPE)
return t1;
if (code2 == VECTOR_TYPE)
return t2;
/* If one type is complex, form the common type of the non-complex
components, then make that complex. Use T1 or T2 if it is the
required type. */
if (code1 == COMPLEX_TYPE || code2 == COMPLEX_TYPE)
{
tree subtype1 = code1 == COMPLEX_TYPE ? TREE_TYPE (t1) : t1;
tree subtype2 = code2 == COMPLEX_TYPE ? TREE_TYPE (t2) : t2;
tree subtype = c_common_type (subtype1, subtype2);
if (code1 == COMPLEX_TYPE && TREE_TYPE (t1) == subtype)
return t1;
else if (code2 == COMPLEX_TYPE && TREE_TYPE (t2) == subtype)
return t2;
else
return build_complex_type (subtype);
}
/* If only one is real, use it as the result. */
if (code1 == REAL_TYPE && code2 != REAL_TYPE)
return t1;
if (code2 == REAL_TYPE && code1 != REAL_TYPE)
return t2;
/* If both are real and either are decimal floating point types, use
the decimal floating point type with the greater precision. */
if (code1 == REAL_TYPE && code2 == REAL_TYPE)
{
if (TYPE_MAIN_VARIANT (t1) == dfloat128_type_node
|| TYPE_MAIN_VARIANT (t2) == dfloat128_type_node)
return dfloat128_type_node;
else if (TYPE_MAIN_VARIANT (t1) == dfloat64_type_node
|| TYPE_MAIN_VARIANT (t2) == dfloat64_type_node)
return dfloat64_type_node;
else if (TYPE_MAIN_VARIANT (t1) == dfloat32_type_node
|| TYPE_MAIN_VARIANT (t2) == dfloat32_type_node)
return dfloat32_type_node;
}
/* Deal with fixed-point types. */
if (code1 == FIXED_POINT_TYPE || code2 == FIXED_POINT_TYPE)
{
unsigned int unsignedp = 0, satp = 0;
scalar_mode m1, m2;
unsigned int fbit1, ibit1, fbit2, ibit2, max_fbit, max_ibit;
m1 = SCALAR_TYPE_MODE (t1);
m2 = SCALAR_TYPE_MODE (t2);
/* If one input type is saturating, the result type is saturating. */
if (TYPE_SATURATING (t1) || TYPE_SATURATING (t2))
satp = 1;
/* If both fixed-point types are unsigned, the result type is unsigned.
When mixing fixed-point and integer types, follow the sign of the
fixed-point type.
Otherwise, the result type is signed. */
if ((TYPE_UNSIGNED (t1) && TYPE_UNSIGNED (t2)
&& code1 == FIXED_POINT_TYPE && code2 == FIXED_POINT_TYPE)
|| (code1 == FIXED_POINT_TYPE && code2 != FIXED_POINT_TYPE
&& TYPE_UNSIGNED (t1))
|| (code1 != FIXED_POINT_TYPE && code2 == FIXED_POINT_TYPE
&& TYPE_UNSIGNED (t2)))
unsignedp = 1;
/* The result type is signed. */
if (unsignedp == 0)
{
/* If the input type is unsigned, we need to convert to the
signed type. */
if (code1 == FIXED_POINT_TYPE && TYPE_UNSIGNED (t1))
{
enum mode_class mclass = (enum mode_class) 0;
if (GET_MODE_CLASS (m1) == MODE_UFRACT)
mclass = MODE_FRACT;
else if (GET_MODE_CLASS (m1) == MODE_UACCUM)
mclass = MODE_ACCUM;
else
gcc_unreachable ();
m1 = as_a <scalar_mode>
(mode_for_size (GET_MODE_PRECISION (m1), mclass, 0));
}
if (code2 == FIXED_POINT_TYPE && TYPE_UNSIGNED (t2))
{
enum mode_class mclass = (enum mode_class) 0;
if (GET_MODE_CLASS (m2) == MODE_UFRACT)
mclass = MODE_FRACT;
else if (GET_MODE_CLASS (m2) == MODE_UACCUM)
mclass = MODE_ACCUM;
else
gcc_unreachable ();
m2 = as_a <scalar_mode>
(mode_for_size (GET_MODE_PRECISION (m2), mclass, 0));
}
}
if (code1 == FIXED_POINT_TYPE)
{
fbit1 = GET_MODE_FBIT (m1);
ibit1 = GET_MODE_IBIT (m1);
}
else
{
fbit1 = 0;
/* Signed integers need to subtract one sign bit. */
ibit1 = TYPE_PRECISION (t1) - (!TYPE_UNSIGNED (t1));
}
if (code2 == FIXED_POINT_TYPE)
{
fbit2 = GET_MODE_FBIT (m2);
ibit2 = GET_MODE_IBIT (m2);
}
else
{
fbit2 = 0;
/* Signed integers need to subtract one sign bit. */
ibit2 = TYPE_PRECISION (t2) - (!TYPE_UNSIGNED (t2));
}
max_ibit = ibit1 >= ibit2 ? ibit1 : ibit2;
max_fbit = fbit1 >= fbit2 ? fbit1 : fbit2;
return c_common_fixed_point_type_for_size (max_ibit, max_fbit, unsignedp,
satp);
}
/* Both real or both integers; use the one with greater precision. */
if (TYPE_PRECISION (t1) > TYPE_PRECISION (t2))
return t1;
else if (TYPE_PRECISION (t2) > TYPE_PRECISION (t1))
return t2;
/* Same precision. Prefer long longs to longs to ints when the
same precision, following the C99 rules on integer type rank
(which are equivalent to the C90 rules for C90 types). */
if (TYPE_MAIN_VARIANT (t1) == long_long_unsigned_type_node
|| TYPE_MAIN_VARIANT (t2) == long_long_unsigned_type_node)
return long_long_unsigned_type_node;
if (TYPE_MAIN_VARIANT (t1) == long_long_integer_type_node
|| TYPE_MAIN_VARIANT (t2) == long_long_integer_type_node)
{
if (TYPE_UNSIGNED (t1) || TYPE_UNSIGNED (t2))
return long_long_unsigned_type_node;
else
return long_long_integer_type_node;
}
if (TYPE_MAIN_VARIANT (t1) == long_unsigned_type_node
|| TYPE_MAIN_VARIANT (t2) == long_unsigned_type_node)
return long_unsigned_type_node;
if (TYPE_MAIN_VARIANT (t1) == long_integer_type_node
|| TYPE_MAIN_VARIANT (t2) == long_integer_type_node)
{
/* But preserve unsignedness from the other type,
since long cannot hold all the values of an unsigned int. */
if (TYPE_UNSIGNED (t1) || TYPE_UNSIGNED (t2))
return long_unsigned_type_node;
else
return long_integer_type_node;
}
/* For floating types of the same TYPE_PRECISION (which we here
assume means either the same set of values, or sets of values
neither a subset of the other, with behavior being undefined in
the latter case), follow the rules from TS 18661-3: prefer
interchange types _FloatN, then standard types long double,
double, float, then extended types _FloatNx. For extended types,
check them starting with _Float128x as that seems most consistent
in spirit with preferring long double to double; for interchange
types, also check in that order for consistency although it's not
possible for more than one of them to have the same
precision. */
tree mv1 = TYPE_MAIN_VARIANT (t1);
tree mv2 = TYPE_MAIN_VARIANT (t2);
for (int i = NUM_FLOATN_TYPES - 1; i >= 0; i--)
if (mv1 == FLOATN_TYPE_NODE (i) || mv2 == FLOATN_TYPE_NODE (i))
return FLOATN_TYPE_NODE (i);
/* Likewise, prefer long double to double even if same size. */
if (mv1 == long_double_type_node || mv2 == long_double_type_node)
return long_double_type_node;
/* Likewise, prefer double to float even if same size.
We got a couple of embedded targets with 32 bit doubles, and the
pdp11 might have 64 bit floats. */
if (mv1 == double_type_node || mv2 == double_type_node)
return double_type_node;
if (mv1 == float_type_node || mv2 == float_type_node)
return float_type_node;
for (int i = NUM_FLOATNX_TYPES - 1; i >= 0; i--)
if (mv1 == FLOATNX_TYPE_NODE (i) || mv2 == FLOATNX_TYPE_NODE (i))
return FLOATNX_TYPE_NODE (i);
/* Otherwise prefer the unsigned one. */
if (TYPE_UNSIGNED (t1))
return t1;
else
return t2;
}
/* Wrapper around c_common_type that is used by c-common.c and other
front end optimizations that remove promotions. ENUMERAL_TYPEs
are allowed here and are converted to their compatible integer types.
BOOLEAN_TYPEs are allowed here and return either boolean_type_node or
preferably a non-Boolean type as the common type. */
tree
common_type (tree t1, tree t2)
{
if (TREE_CODE (t1) == ENUMERAL_TYPE)
t1 = c_common_type_for_size (TYPE_PRECISION (t1), 1);
if (TREE_CODE (t2) == ENUMERAL_TYPE)
t2 = c_common_type_for_size (TYPE_PRECISION (t2), 1);
/* If both types are BOOLEAN_TYPE, then return boolean_type_node. */
if (TREE_CODE (t1) == BOOLEAN_TYPE
&& TREE_CODE (t2) == BOOLEAN_TYPE)
return boolean_type_node;
/* If either type is BOOLEAN_TYPE, then return the other. */
if (TREE_CODE (t1) == BOOLEAN_TYPE)
return t2;
if (TREE_CODE (t2) == BOOLEAN_TYPE)
return t1;
return c_common_type (t1, t2);
}
/* Return 1 if TYPE1 and TYPE2 are compatible types for assignment
or various other operations. Return 2 if they are compatible
but a warning may be needed if you use them together. */
int
comptypes (tree type1, tree type2)
{
const struct tagged_tu_seen_cache * tagged_tu_seen_base1 = tagged_tu_seen_base;
int val;
val = comptypes_internal (type1, type2, NULL, NULL);
free_all_tagged_tu_seen_up_to (tagged_tu_seen_base1);
return val;
}
/* Like comptypes, but if it returns non-zero because enum and int are
compatible, it sets *ENUM_AND_INT_P to true. */
static int
comptypes_check_enum_int (tree type1, tree type2, bool *enum_and_int_p)
{
const struct tagged_tu_seen_cache * tagged_tu_seen_base1 = tagged_tu_seen_base;
int val;
val = comptypes_internal (type1, type2, enum_and_int_p, NULL);
free_all_tagged_tu_seen_up_to (tagged_tu_seen_base1);
return val;
}
/* Like comptypes, but if it returns nonzero for different types, it
sets *DIFFERENT_TYPES_P to true. */
int
comptypes_check_different_types (tree type1, tree type2,
bool *different_types_p)
{
const struct tagged_tu_seen_cache * tagged_tu_seen_base1 = tagged_tu_seen_base;
int val;
val = comptypes_internal (type1, type2, NULL, different_types_p);
free_all_tagged_tu_seen_up_to (tagged_tu_seen_base1);
return val;
}
/* Return 1 if TYPE1 and TYPE2 are compatible types for assignment
or various other operations. Return 2 if they are compatible
but a warning may be needed if you use them together. If
ENUM_AND_INT_P is not NULL, and one type is an enum and the other a
compatible integer type, then this sets *ENUM_AND_INT_P to true;
*ENUM_AND_INT_P is never set to false. If DIFFERENT_TYPES_P is not
NULL, and the types are compatible but different enough not to be
permitted in C11 typedef redeclarations, then this sets
*DIFFERENT_TYPES_P to true; *DIFFERENT_TYPES_P is never set to
false, but may or may not be set if the types are incompatible.
This differs from comptypes, in that we don't free the seen
types. */
static int
comptypes_internal (const_tree type1, const_tree type2, bool *enum_and_int_p,
bool *different_types_p)
{
const_tree t1 = type1;
const_tree t2 = type2;
int attrval, val;
/* Suppress errors caused by previously reported errors. */
if (t1 == t2 || !t1 || !t2
|| TREE_CODE (t1) == ERROR_MARK || TREE_CODE (t2) == ERROR_MARK)
return 1;
/* Enumerated types are compatible with integer types, but this is
not transitive: two enumerated types in the same translation unit
are compatible with each other only if they are the same type. */
if (TREE_CODE (t1) == ENUMERAL_TYPE && TREE_CODE (t2) != ENUMERAL_TYPE)
{
t1 = c_common_type_for_size (TYPE_PRECISION (t1), TYPE_UNSIGNED (t1));
if (TREE_CODE (t2) != VOID_TYPE)
{
if (enum_and_int_p != NULL)
*enum_and_int_p = true;
if (different_types_p != NULL)
*different_types_p = true;
}
}
else if (TREE_CODE (t2) == ENUMERAL_TYPE && TREE_CODE (t1) != ENUMERAL_TYPE)
{
t2 = c_common_type_for_size (TYPE_PRECISION (t2), TYPE_UNSIGNED (t2));
if (TREE_CODE (t1) != VOID_TYPE)
{
if (enum_and_int_p != NULL)
*enum_and_int_p = true;
if (different_types_p != NULL)
*different_types_p = true;
}
}
if (t1 == t2)
return 1;
/* Different classes of types can't be compatible. */
if (TREE_CODE (t1) != TREE_CODE (t2))
return 0;
/* Qualifiers must match. C99 6.7.3p9 */
if (TYPE_QUALS (t1) != TYPE_QUALS (t2))
return 0;
/* Allow for two different type nodes which have essentially the same
definition. Note that we already checked for equality of the type
qualifiers (just above). */
if (TREE_CODE (t1) != ARRAY_TYPE
&& TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))
return 1;
/* 1 if no need for warning yet, 2 if warning cause has been seen. */
if (!(attrval = comp_type_attributes (t1, t2)))
return 0;
/* 1 if no need for warning yet, 2 if warning cause has been seen. */
val = 0;
switch (TREE_CODE (t1))
{
case INTEGER_TYPE:
case FIXED_POINT_TYPE:
case REAL_TYPE:
/* With these nodes, we can't determine type equivalence by
looking at what is stored in the nodes themselves, because
two nodes might have different TYPE_MAIN_VARIANTs but still
represent the same type. For example, wchar_t and int could
have the same properties (TYPE_PRECISION, TYPE_MIN_VALUE,
TYPE_MAX_VALUE, etc.), but have different TYPE_MAIN_VARIANTs
and are distinct types. On the other hand, int and the
following typedef
typedef int INT __attribute((may_alias));
have identical properties, different TYPE_MAIN_VARIANTs, but
represent the same type. The canonical type system keeps
track of equivalence in this case, so we fall back on it. */
return TYPE_CANONICAL (t1) == TYPE_CANONICAL (t2);
case POINTER_TYPE:
/* Do not remove mode information. */
if (TYPE_MODE (t1) != TYPE_MODE (t2))
break;
val = (TREE_TYPE (t1) == TREE_TYPE (t2)
? 1 : comptypes_internal (TREE_TYPE (t1), TREE_TYPE (t2),
enum_and_int_p, different_types_p));
break;
case FUNCTION_TYPE:
val = function_types_compatible_p (t1, t2, enum_and_int_p,
different_types_p);
break;
case ARRAY_TYPE:
{
tree d1 = TYPE_DOMAIN (t1);
tree d2 = TYPE_DOMAIN (t2);
bool d1_variable, d2_variable;
bool d1_zero, d2_zero;
val = 1;
/* Target types must match incl. qualifiers. */
if (TREE_TYPE (t1) != TREE_TYPE (t2)
&& (val = comptypes_internal (TREE_TYPE (t1), TREE_TYPE (t2),
enum_and_int_p,
different_types_p)) == 0)
return 0;
if (different_types_p != NULL
&& (d1 == NULL_TREE) != (d2 == NULL_TREE))
*different_types_p = true;
/* Sizes must match unless one is missing or variable. */
if (d1 == NULL_TREE || d2 == NULL_TREE || d1 == d2)
break;
d1_zero = !TYPE_MAX_VALUE (d1);
d2_zero = !TYPE_MAX_VALUE (d2);
d1_variable = (!d1_zero
&& (TREE_CODE (TYPE_MIN_VALUE (d1)) != INTEGER_CST
|| TREE_CODE (TYPE_MAX_VALUE (d1)) != INTEGER_CST));
d2_variable = (!d2_zero
&& (TREE_CODE (TYPE_MIN_VALUE (d2)) != INTEGER_CST
|| TREE_CODE (TYPE_MAX_VALUE (d2)) != INTEGER_CST));
d1_variable = d1_variable || (d1_zero && c_vla_type_p (t1));
d2_variable = d2_variable || (d2_zero && c_vla_type_p (t2));
if (different_types_p != NULL
&& d1_variable != d2_variable)
*different_types_p = true;
if (d1_variable || d2_variable)
break;
if (d1_zero && d2_zero)
break;
if (d1_zero || d2_zero
|| !tree_int_cst_equal (TYPE_MIN_VALUE (d1), TYPE_MIN_VALUE (d2))
|| !tree_int_cst_equal (TYPE_MAX_VALUE (d1), TYPE_MAX_VALUE (d2)))
val = 0;
break;
}
case ENUMERAL_TYPE:
case RECORD_TYPE:
case UNION_TYPE:
if (val != 1 && !same_translation_unit_p (t1, t2))
{
tree a1 = TYPE_ATTRIBUTES (t1);
tree a2 = TYPE_ATTRIBUTES (t2);
if (! attribute_list_contained (a1, a2)
&& ! attribute_list_contained (a2, a1))
break;
if (attrval != 2)
return tagged_types_tu_compatible_p (t1, t2, enum_and_int_p,
different_types_p);
val = tagged_types_tu_compatible_p (t1, t2, enum_and_int_p,
different_types_p);
}
break;
case VECTOR_TYPE:
val = (known_eq (TYPE_VECTOR_SUBPARTS (t1), TYPE_VECTOR_SUBPARTS (t2))
&& comptypes_internal (TREE_TYPE (t1), TREE_TYPE (t2),
enum_and_int_p, different_types_p));
break;
default:
break;
}
return attrval == 2 && val == 1 ? 2 : val;
}
/* Return 1 if TTL and TTR are pointers to types that are equivalent, ignoring
their qualifiers, except for named address spaces. If the pointers point to
different named addresses, then we must determine if one address space is a
subset of the other. */
static int
comp_target_types (location_t location, tree ttl, tree ttr)
{
int val;
int val_ped;
tree mvl = TREE_TYPE (ttl);
tree mvr = TREE_TYPE (ttr);
addr_space_t asl = TYPE_ADDR_SPACE (mvl);
addr_space_t asr = TYPE_ADDR_SPACE (mvr);
addr_space_t as_common;
bool enum_and_int_p;
/* Fail if pointers point to incompatible address spaces. */
if (!addr_space_superset (asl, asr, &as_common))
return 0;
/* For pedantic record result of comptypes on arrays before losing
qualifiers on the element type below. */
val_ped = 1;
if (TREE_CODE (mvl) == ARRAY_TYPE
&& TREE_CODE (mvr) == ARRAY_TYPE)
val_ped = comptypes (mvl, mvr);
/* Qualifiers on element types of array types that are
pointer targets are lost by taking their TYPE_MAIN_VARIANT. */
mvl = (TYPE_ATOMIC (strip_array_types (mvl))
? c_build_qualified_type (TYPE_MAIN_VARIANT (mvl), TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (mvl));
mvr = (TYPE_ATOMIC (strip_array_types (mvr))
? c_build_qualified_type (TYPE_MAIN_VARIANT (mvr), TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (mvr));
enum_and_int_p = false;
val = comptypes_check_enum_int (mvl, mvr, &enum_and_int_p);
if (val == 1 && val_ped != 1)
pedwarn (location, OPT_Wpedantic, "pointers to arrays with different qualifiers "
"are incompatible in ISO C");
if (val == 2)
pedwarn (location, OPT_Wpedantic, "types are not quite compatible");
if (val == 1 && enum_and_int_p && warn_cxx_compat)
warning_at (location, OPT_Wc___compat,
"pointer target types incompatible in C++");
return val;
}
/* Subroutines of `comptypes'. */
/* Determine whether two trees derive from the same translation unit.
If the CONTEXT chain ends in a null, that tree's context is still
being parsed, so if two trees have context chains ending in null,
they're in the same translation unit. */
bool
same_translation_unit_p (const_tree t1, const_tree t2)
{
while (t1 && TREE_CODE (t1) != TRANSLATION_UNIT_DECL)
switch (TREE_CODE_CLASS (TREE_CODE (t1)))
{
case tcc_declaration:
t1 = DECL_CONTEXT (t1); break;
case tcc_type:
t1 = TYPE_CONTEXT (t1); break;
case tcc_exceptional:
t1 = BLOCK_SUPERCONTEXT (t1); break; /* assume block */
default: gcc_unreachable ();
}
while (t2 && TREE_CODE (t2) != TRANSLATION_UNIT_DECL)
switch (TREE_CODE_CLASS (TREE_CODE (t2)))
{
case tcc_declaration:
t2 = DECL_CONTEXT (t2); break;
case tcc_type:
t2 = TYPE_CONTEXT (t2); break;
case tcc_exceptional:
t2 = BLOCK_SUPERCONTEXT (t2); break; /* assume block */
default: gcc_unreachable ();
}
return t1 == t2;
}
/* Allocate the seen two types, assuming that they are compatible. */
static struct tagged_tu_seen_cache *
alloc_tagged_tu_seen_cache (const_tree t1, const_tree t2)
{
struct tagged_tu_seen_cache *tu = XNEW (struct tagged_tu_seen_cache);
tu->next = tagged_tu_seen_base;
tu->t1 = t1;
tu->t2 = t2;
tagged_tu_seen_base = tu;
/* The C standard says that two structures in different translation
units are compatible with each other only if the types of their
fields are compatible (among other things). We assume that they
are compatible until proven otherwise when building the cache.
An example where this can occur is:
struct a
{
struct a *next;
};
If we are comparing this against a similar struct in another TU,
and did not assume they were compatible, we end up with an infinite
loop. */
tu->val = 1;
return tu;
}
/* Free the seen types until we get to TU_TIL. */
static void
free_all_tagged_tu_seen_up_to (const struct tagged_tu_seen_cache *tu_til)
{
const struct tagged_tu_seen_cache *tu = tagged_tu_seen_base;
while (tu != tu_til)
{
const struct tagged_tu_seen_cache *const tu1
= (const struct tagged_tu_seen_cache *) tu;
tu = tu1->next;
free (CONST_CAST (struct tagged_tu_seen_cache *, tu1));
}
tagged_tu_seen_base = tu_til;
}
/* Return 1 if two 'struct', 'union', or 'enum' types T1 and T2 are
compatible. If the two types are not the same (which has been
checked earlier), this can only happen when multiple translation
units are being compiled. See C99 6.2.7 paragraph 1 for the exact
rules. ENUM_AND_INT_P and DIFFERENT_TYPES_P are as in
comptypes_internal. */
static int
tagged_types_tu_compatible_p (const_tree t1, const_tree t2,
bool *enum_and_int_p, bool *different_types_p)
{
tree s1, s2;
bool needs_warning = false;
/* We have to verify that the tags of the types are the same. This
is harder than it looks because this may be a typedef, so we have
to go look at the original type. It may even be a typedef of a
typedef...
In the case of compiler-created builtin structs the TYPE_DECL
may be a dummy, with no DECL_ORIGINAL_TYPE. Don't fault. */
while (TYPE_NAME (t1)
&& TREE_CODE (TYPE_NAME (t1)) == TYPE_DECL
&& DECL_ORIGINAL_TYPE (TYPE_NAME (t1)))
t1 = DECL_ORIGINAL_TYPE (TYPE_NAME (t1));
while (TYPE_NAME (t2)
&& TREE_CODE (TYPE_NAME (t2)) == TYPE_DECL
&& DECL_ORIGINAL_TYPE (TYPE_NAME (t2)))
t2 = DECL_ORIGINAL_TYPE (TYPE_NAME (t2));
/* C90 didn't have the requirement that the two tags be the same. */
if (flag_isoc99 && TYPE_NAME (t1) != TYPE_NAME (t2))
return 0;
/* C90 didn't say what happened if one or both of the types were
incomplete; we choose to follow C99 rules here, which is that they
are compatible. */
if (TYPE_SIZE (t1) == NULL
|| TYPE_SIZE (t2) == NULL)
return 1;
{
const struct tagged_tu_seen_cache * tts_i;
for (tts_i = tagged_tu_seen_base; tts_i != NULL; tts_i = tts_i->next)
if (tts_i->t1 == t1 && tts_i->t2 == t2)
return tts_i->val;
}
switch (TREE_CODE (t1))
{
case ENUMERAL_TYPE:
{
struct tagged_tu_seen_cache *tu = alloc_tagged_tu_seen_cache (t1, t2);
/* Speed up the case where the type values are in the same order. */
tree tv1 = TYPE_VALUES (t1);
tree tv2 = TYPE_VALUES (t2);
if (tv1 == tv2)
{
return 1;
}
for (;tv1 && tv2; tv1 = TREE_CHAIN (tv1), tv2 = TREE_CHAIN (tv2))
{
if (TREE_PURPOSE (tv1) != TREE_PURPOSE (tv2))
break;
if (simple_cst_equal (TREE_VALUE (tv1), TREE_VALUE (tv2)) != 1)
{
tu->val = 0;
return 0;
}
}
if (tv1 == NULL_TREE && tv2 == NULL_TREE)
{
return 1;
}
if (tv1 == NULL_TREE || tv2 == NULL_TREE)
{
tu->val = 0;
return 0;
}
if (list_length (TYPE_VALUES (t1)) != list_length (TYPE_VALUES (t2)))
{
tu->val = 0;
return 0;
}
for (s1 = TYPE_VALUES (t1); s1; s1 = TREE_CHAIN (s1))
{
s2 = purpose_member (TREE_PURPOSE (s1), TYPE_VALUES (t2));
if (s2 == NULL
|| simple_cst_equal (TREE_VALUE (s1), TREE_VALUE (s2)) != 1)
{
tu->val = 0;
return 0;
}
}
return 1;
}
case UNION_TYPE:
{
struct tagged_tu_seen_cache *tu = alloc_tagged_tu_seen_cache (t1, t2);
if (list_length (TYPE_FIELDS (t1)) != list_length (TYPE_FIELDS (t2)))
{
tu->val = 0;
return 0;
}
/* Speed up the common case where the fields are in the same order. */
for (s1 = TYPE_FIELDS (t1), s2 = TYPE_FIELDS (t2); s1 && s2;
s1 = DECL_CHAIN (s1), s2 = DECL_CHAIN (s2))
{
int result;
if (DECL_NAME (s1) != DECL_NAME (s2))
break;
result = comptypes_internal (TREE_TYPE (s1), TREE_TYPE (s2),
enum_and_int_p, different_types_p);
if (result != 1 && !DECL_NAME (s1))
break;
if (result == 0)
{
tu->val = 0;
return 0;
}
if (result == 2)
needs_warning = true;
if (TREE_CODE (s1) == FIELD_DECL
&& simple_cst_equal (DECL_FIELD_BIT_OFFSET (s1),
DECL_FIELD_BIT_OFFSET (s2)) != 1)
{
tu->val = 0;
return 0;
}
}
if (!s1 && !s2)
{
tu->val = needs_warning ? 2 : 1;
return tu->val;
}
for (s1 = TYPE_FIELDS (t1); s1; s1 = DECL_CHAIN (s1))
{
bool ok = false;
for (s2 = TYPE_FIELDS (t2); s2; s2 = DECL_CHAIN (s2))
if (DECL_NAME (s1) == DECL_NAME (s2))
{
int result;
result = comptypes_internal (TREE_TYPE (s1), TREE_TYPE (s2),
enum_and_int_p,
different_types_p);
if (result != 1 && !DECL_NAME (s1))
continue;
if (result == 0)
{
tu->val = 0;
return 0;
}
if (result == 2)
needs_warning = true;
if (TREE_CODE (s1) == FIELD_DECL
&& simple_cst_equal (DECL_FIELD_BIT_OFFSET (s1),
DECL_FIELD_BIT_OFFSET (s2)) != 1)
break;
ok = true;
break;
}
if (!ok)
{
tu->val = 0;
return 0;
}
}
tu->val = needs_warning ? 2 : 10;
return tu->val;
}
case RECORD_TYPE:
{
struct tagged_tu_seen_cache *tu = alloc_tagged_tu_seen_cache (t1, t2);
for (s1 = TYPE_FIELDS (t1), s2 = TYPE_FIELDS (t2);
s1 && s2;
s1 = DECL_CHAIN (s1), s2 = DECL_CHAIN (s2))
{
int result;
if (TREE_CODE (s1) != TREE_CODE (s2)
|| DECL_NAME (s1) != DECL_NAME (s2))
break;
result = comptypes_internal (TREE_TYPE (s1), TREE_TYPE (s2),
enum_and_int_p, different_types_p);
if (result == 0)
break;
if (result == 2)
needs_warning = true;
if (TREE_CODE (s1) == FIELD_DECL
&& simple_cst_equal (DECL_FIELD_BIT_OFFSET (s1),
DECL_FIELD_BIT_OFFSET (s2)) != 1)
break;
}
if (s1 && s2)
tu->val = 0;
else
tu->val = needs_warning ? 2 : 1;
return tu->val;
}
default:
gcc_unreachable ();
}
}
/* Return 1 if two function types F1 and F2 are compatible.
If either type specifies no argument types,
the other must specify a fixed number of self-promoting arg types.
Otherwise, if one type specifies only the number of arguments,
the other must specify that number of self-promoting arg types.
Otherwise, the argument types must match.
ENUM_AND_INT_P and DIFFERENT_TYPES_P are as in comptypes_internal. */
static int
function_types_compatible_p (const_tree f1, const_tree f2,
bool *enum_and_int_p, bool *different_types_p)
{
tree args1, args2;
/* 1 if no need for warning yet, 2 if warning cause has been seen. */
int val = 1;
int val1;
tree ret1, ret2;
ret1 = TREE_TYPE (f1);
ret2 = TREE_TYPE (f2);
/* 'volatile' qualifiers on a function's return type used to mean
the function is noreturn. */
if (TYPE_VOLATILE (ret1) != TYPE_VOLATILE (ret2))
pedwarn (input_location, 0, "function return types not compatible due to %<volatile%>");
if (TYPE_VOLATILE (ret1))
ret1 = build_qualified_type (TYPE_MAIN_VARIANT (ret1),
TYPE_QUALS (ret1) & ~TYPE_QUAL_VOLATILE);
if (TYPE_VOLATILE (ret2))
ret2 = build_qualified_type (TYPE_MAIN_VARIANT (ret2),
TYPE_QUALS (ret2) & ~TYPE_QUAL_VOLATILE);
val = comptypes_internal (ret1, ret2, enum_and_int_p, different_types_p);
if (val == 0)
return 0;
args1 = TYPE_ARG_TYPES (f1);
args2 = TYPE_ARG_TYPES (f2);
if (different_types_p != NULL
&& (args1 == NULL_TREE) != (args2 == NULL_TREE))
*different_types_p = true;
/* An unspecified parmlist matches any specified parmlist
whose argument types don't need default promotions. */
if (args1 == NULL_TREE)
{
if (!self_promoting_args_p (args2))
return 0;
/* If one of these types comes from a non-prototype fn definition,
compare that with the other type's arglist.
If they don't match, ask for a warning (but no error). */
if (TYPE_ACTUAL_ARG_TYPES (f1)
&& type_lists_compatible_p (args2, TYPE_ACTUAL_ARG_TYPES (f1),
enum_and_int_p, different_types_p) != 1)
val = 2;
return val;
}
if (args2 == NULL_TREE)
{
if (!self_promoting_args_p (args1))
return 0;
if (TYPE_ACTUAL_ARG_TYPES (f2)
&& type_lists_compatible_p (args1, TYPE_ACTUAL_ARG_TYPES (f2),
enum_and_int_p, different_types_p) != 1)
val = 2;
return val;
}
/* Both types have argument lists: compare them and propagate results. */
val1 = type_lists_compatible_p (args1, args2, enum_and_int_p,
different_types_p);
return val1 != 1 ? val1 : val;
}
/* Check two lists of types for compatibility, returning 0 for
incompatible, 1 for compatible, or 2 for compatible with
warning. ENUM_AND_INT_P and DIFFERENT_TYPES_P are as in
comptypes_internal. */
static int
type_lists_compatible_p (const_tree args1, const_tree args2,
bool *enum_and_int_p, bool *different_types_p)
{
/* 1 if no need for warning yet, 2 if warning cause has been seen. */
int val = 1;
int newval = 0;
while (1)
{
tree a1, mv1, a2, mv2;
if (args1 == NULL_TREE && args2 == NULL_TREE)
return val;
/* If one list is shorter than the other,
they fail to match. */
if (args1 == NULL_TREE || args2 == NULL_TREE)
return 0;
mv1 = a1 = TREE_VALUE (args1);
mv2 = a2 = TREE_VALUE (args2);
if (mv1 && mv1 != error_mark_node && TREE_CODE (mv1) != ARRAY_TYPE)
mv1 = (TYPE_ATOMIC (mv1)
? c_build_qualified_type (TYPE_MAIN_VARIANT (mv1),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (mv1));
if (mv2 && mv2 != error_mark_node && TREE_CODE (mv2) != ARRAY_TYPE)
mv2 = (TYPE_ATOMIC (mv2)
? c_build_qualified_type (TYPE_MAIN_VARIANT (mv2),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (mv2));
/* A null pointer instead of a type
means there is supposed to be an argument
but nothing is specified about what type it has.
So match anything that self-promotes. */
if (different_types_p != NULL
&& (a1 == NULL_TREE) != (a2 == NULL_TREE))
*different_types_p = true;
if (a1 == NULL_TREE)
{
if (c_type_promotes_to (a2) != a2)
return 0;
}
else if (a2 == NULL_TREE)
{
if (c_type_promotes_to (a1) != a1)
return 0;
}
/* If one of the lists has an error marker, ignore this arg. */
else if (TREE_CODE (a1) == ERROR_MARK
|| TREE_CODE (a2) == ERROR_MARK)
;
else if (!(newval = comptypes_internal (mv1, mv2, enum_and_int_p,
different_types_p)))
{
if (different_types_p != NULL)
*different_types_p = true;
/* Allow wait (union {union wait *u; int *i} *)
and wait (union wait *) to be compatible. */
if (TREE_CODE (a1) == UNION_TYPE
&& (TYPE_NAME (a1) == NULL_TREE
|| TYPE_TRANSPARENT_AGGR (a1))
&& TREE_CODE (TYPE_SIZE (a1)) == INTEGER_CST
&& tree_int_cst_equal (TYPE_SIZE (a1),
TYPE_SIZE (a2)))
{
tree memb;
for (memb = TYPE_FIELDS (a1);
memb; memb = DECL_CHAIN (memb))
{
tree mv3 = TREE_TYPE (memb);
if (mv3 && mv3 != error_mark_node
&& TREE_CODE (mv3) != ARRAY_TYPE)
mv3 = (TYPE_ATOMIC (mv3)
? c_build_qualified_type (TYPE_MAIN_VARIANT (mv3),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (mv3));
if (comptypes_internal (mv3, mv2, enum_and_int_p,
different_types_p))
break;
}
if (memb == NULL_TREE)
return 0;
}
else if (TREE_CODE (a2) == UNION_TYPE
&& (TYPE_NAME (a2) == NULL_TREE
|| TYPE_TRANSPARENT_AGGR (a2))
&& TREE_CODE (TYPE_SIZE (a2)) == INTEGER_CST
&& tree_int_cst_equal (TYPE_SIZE (a2),
TYPE_SIZE (a1)))
{
tree memb;
for (memb = TYPE_FIELDS (a2);
memb; memb = DECL_CHAIN (memb))
{
tree mv3 = TREE_TYPE (memb);
if (mv3 && mv3 != error_mark_node
&& TREE_CODE (mv3) != ARRAY_TYPE)
mv3 = (TYPE_ATOMIC (mv3)
? c_build_qualified_type (TYPE_MAIN_VARIANT (mv3),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (mv3));
if (comptypes_internal (mv3, mv1, enum_and_int_p,
different_types_p))
break;
}
if (memb == NULL_TREE)
return 0;
}
else
return 0;
}
/* comptypes said ok, but record if it said to warn. */
if (newval > val)
val = newval;
args1 = TREE_CHAIN (args1);
args2 = TREE_CHAIN (args2);
}
}
/* Compute the size to increment a pointer by. When a function type or void
type or incomplete type is passed, size_one_node is returned.
This function does not emit any diagnostics; the caller is responsible
for that. */
static tree
c_size_in_bytes (const_tree type)
{
enum tree_code code = TREE_CODE (type);
if (code == FUNCTION_TYPE || code == VOID_TYPE || code == ERROR_MARK
|| !COMPLETE_TYPE_P (type))
return size_one_node;
/* Convert in case a char is more than one unit. */
return size_binop_loc (input_location, CEIL_DIV_EXPR, TYPE_SIZE_UNIT (type),
size_int (TYPE_PRECISION (char_type_node)
/ BITS_PER_UNIT));
}
/* Return either DECL or its known constant value (if it has one). */
tree
decl_constant_value_1 (tree decl, bool in_init)
{
if (/* Note that DECL_INITIAL isn't valid for a PARM_DECL. */
TREE_CODE (decl) != PARM_DECL
&& !TREE_THIS_VOLATILE (decl)
&& TREE_READONLY (decl)
&& DECL_INITIAL (decl) != NULL_TREE
&& !error_operand_p (DECL_INITIAL (decl))
/* This is invalid if initial value is not constant.
If it has either a function call, a memory reference,
or a variable, then re-evaluating it could give different results. */
&& TREE_CONSTANT (DECL_INITIAL (decl))
/* Check for cases where this is sub-optimal, even though valid. */
&& (in_init || TREE_CODE (DECL_INITIAL (decl)) != CONSTRUCTOR))
return DECL_INITIAL (decl);
return decl;
}
/* Return either DECL or its known constant value (if it has one).
Like the above, but always return decl outside of functions. */
tree
decl_constant_value (tree decl)
{
/* Don't change a variable array bound or initial value to a constant
in a place where a variable is invalid. */
return current_function_decl ? decl_constant_value_1 (decl, false) : decl;
}
/* Convert the array expression EXP to a pointer. */
static tree
array_to_pointer_conversion (location_t loc, tree exp)
{
tree orig_exp = exp;
tree type = TREE_TYPE (exp);
tree adr;
tree restype = TREE_TYPE (type);
tree ptrtype;
gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
STRIP_TYPE_NOPS (exp);
if (TREE_NO_WARNING (orig_exp))
TREE_NO_WARNING (exp) = 1;
ptrtype = build_pointer_type (restype);
if (INDIRECT_REF_P (exp))
return convert (ptrtype, TREE_OPERAND (exp, 0));
/* In C++ array compound literals are temporary objects unless they are
const or appear in namespace scope, so they are destroyed too soon
to use them for much of anything (c++/53220). */
if (warn_cxx_compat && TREE_CODE (exp) == COMPOUND_LITERAL_EXPR)
{
tree decl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
if (!TREE_READONLY (decl) && !TREE_STATIC (decl))
warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat,
"converting an array compound literal to a pointer "
"is ill-formed in C++");
}
adr = build_unary_op (loc, ADDR_EXPR, exp, true);
return convert (ptrtype, adr);
}
/* Convert the function expression EXP to a pointer. */
static tree
function_to_pointer_conversion (location_t loc, tree exp)
{
tree orig_exp = exp;
gcc_assert (TREE_CODE (TREE_TYPE (exp)) == FUNCTION_TYPE);
STRIP_TYPE_NOPS (exp);
if (TREE_NO_WARNING (orig_exp))
TREE_NO_WARNING (exp) = 1;
return build_unary_op (loc, ADDR_EXPR, exp, false);
}
/* Mark EXP as read, not just set, for set but not used -Wunused
warning purposes. */
void
mark_exp_read (tree exp)
{
switch (TREE_CODE (exp))
{
case VAR_DECL:
case PARM_DECL:
DECL_READ_P (exp) = 1;
break;
case ARRAY_REF:
case COMPONENT_REF:
case MODIFY_EXPR:
case REALPART_EXPR:
case IMAGPART_EXPR:
CASE_CONVERT:
case ADDR_EXPR:
case VIEW_CONVERT_EXPR:
mark_exp_read (TREE_OPERAND (exp, 0));
break;
case COMPOUND_EXPR:
/* Pattern match what build_atomic_assign produces with modifycode
NOP_EXPR. */
if (VAR_P (TREE_OPERAND (exp, 1))
&& DECL_ARTIFICIAL (TREE_OPERAND (exp, 1))
&& TREE_CODE (TREE_OPERAND (exp, 0)) == COMPOUND_EXPR)
{
tree t1 = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
tree t2 = TREE_OPERAND (TREE_OPERAND (exp, 0), 1);
if (TREE_CODE (t1) == TARGET_EXPR
&& TARGET_EXPR_SLOT (t1) == TREE_OPERAND (exp, 1)
&& TREE_CODE (t2) == CALL_EXPR)
{
tree fndecl = get_callee_fndecl (t2);
tree arg = NULL_TREE;
if (fndecl
&& TREE_CODE (fndecl) == FUNCTION_DECL
&& fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
&& call_expr_nargs (t2) >= 2)
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_ATOMIC_STORE:
arg = CALL_EXPR_ARG (t2, 1);
break;
case BUILT_IN_ATOMIC_STORE_1:
case BUILT_IN_ATOMIC_STORE_2:
case BUILT_IN_ATOMIC_STORE_4:
case BUILT_IN_ATOMIC_STORE_8:
case BUILT_IN_ATOMIC_STORE_16:
arg = CALL_EXPR_ARG (t2, 0);
break;
default:
break;
}
if (arg)
{
STRIP_NOPS (arg);
if (TREE_CODE (arg) == ADDR_EXPR
&& DECL_P (TREE_OPERAND (arg, 0))
&& TYPE_ATOMIC (TREE_TYPE (TREE_OPERAND (arg, 0))))
mark_exp_read (TREE_OPERAND (arg, 0));
}
}
}
/* FALLTHRU */
case C_MAYBE_CONST_EXPR:
mark_exp_read (TREE_OPERAND (exp, 1));
break;
default:
break;
}
}
/* Perform the default conversion of arrays and functions to pointers.
Return the result of converting EXP. For any other expression, just
return EXP.
LOC is the location of the expression. */
struct c_expr
default_function_array_conversion (location_t loc, struct c_expr exp)
{
tree orig_exp = exp.value;
tree type = TREE_TYPE (exp.value);
enum tree_code code = TREE_CODE (type);
switch (code)
{
case ARRAY_TYPE:
{
bool not_lvalue = false;
bool lvalue_array_p;
while ((TREE_CODE (exp.value) == NON_LVALUE_EXPR
|| CONVERT_EXPR_P (exp.value))
&& TREE_TYPE (TREE_OPERAND (exp.value, 0)) == type)
{
if (TREE_CODE (exp.value) == NON_LVALUE_EXPR)
not_lvalue = true;
exp.value = TREE_OPERAND (exp.value, 0);
}
if (TREE_NO_WARNING (orig_exp))
TREE_NO_WARNING (exp.value) = 1;
lvalue_array_p = !not_lvalue && lvalue_p (exp.value);
if (!flag_isoc99 && !lvalue_array_p)
{
/* Before C99, non-lvalue arrays do not decay to pointers.
Normally, using such an array would be invalid; but it can
be used correctly inside sizeof or as a statement expression.
Thus, do not give an error here; an error will result later. */
return exp;
}
exp.value = array_to_pointer_conversion (loc, exp.value);
}
break;
case FUNCTION_TYPE:
exp.value = function_to_pointer_conversion (loc, exp.value);
break;
default:
break;
}
return exp;
}
struct c_expr
default_function_array_read_conversion (location_t loc, struct c_expr exp)
{
mark_exp_read (exp.value);
return default_function_array_conversion (loc, exp);
}
/* Return whether EXPR should be treated as an atomic lvalue for the
purposes of load and store handling. */
static bool
really_atomic_lvalue (tree expr)
{
if (error_operand_p (expr))
return false;
if (!TYPE_ATOMIC (TREE_TYPE (expr)))
return false;
if (!lvalue_p (expr))
return false;
/* Ignore _Atomic on register variables, since their addresses can't
be taken so (a) atomicity is irrelevant and (b) the normal atomic
sequences wouldn't work. Ignore _Atomic on structures containing
bit-fields, since accessing elements of atomic structures or
unions is undefined behavior (C11 6.5.2.3#5), but it's unclear if
it's undefined at translation time or execution time, and the
normal atomic sequences again wouldn't work. */
while (handled_component_p (expr))
{
if (TREE_CODE (expr) == COMPONENT_REF
&& DECL_C_BIT_FIELD (TREE_OPERAND (expr, 1)))
return false;
expr = TREE_OPERAND (expr, 0);
}
if (DECL_P (expr) && C_DECL_REGISTER (expr))
return false;
return true;
}
/* Convert expression EXP (location LOC) from lvalue to rvalue,
including converting functions and arrays to pointers if CONVERT_P.
If READ_P, also mark the expression as having been read. */
struct c_expr
convert_lvalue_to_rvalue (location_t loc, struct c_expr exp,
bool convert_p, bool read_p)
{
if (read_p)
mark_exp_read (exp.value);
if (convert_p)
exp = default_function_array_conversion (loc, exp);
if (!VOID_TYPE_P (TREE_TYPE (exp.value)))
exp.value = require_complete_type (loc, exp.value);
if (really_atomic_lvalue (exp.value))
{
vec<tree, va_gc> *params;
tree nonatomic_type, tmp, tmp_addr, fndecl, func_call;
tree expr_type = TREE_TYPE (exp.value);
tree expr_addr = build_unary_op (loc, ADDR_EXPR, exp.value, false);
tree seq_cst = build_int_cst (integer_type_node, MEMMODEL_SEQ_CST);
gcc_assert (TYPE_ATOMIC (expr_type));
/* Expansion of a generic atomic load may require an addition
element, so allocate enough to prevent a resize. */
vec_alloc (params, 4);
/* Remove the qualifiers for the rest of the expressions and
create the VAL temp variable to hold the RHS. */
nonatomic_type = build_qualified_type (expr_type, TYPE_UNQUALIFIED);
tmp = create_tmp_var_raw (nonatomic_type);
tmp_addr = build_unary_op (loc, ADDR_EXPR, tmp, false);
TREE_ADDRESSABLE (tmp) = 1;
TREE_NO_WARNING (tmp) = 1;
/* Issue __atomic_load (&expr, &tmp, SEQ_CST); */
fndecl = builtin_decl_explicit (BUILT_IN_ATOMIC_LOAD);
params->quick_push (expr_addr);
params->quick_push (tmp_addr);
params->quick_push (seq_cst);
func_call = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL);
/* EXPR is always read. */
mark_exp_read (exp.value);
/* Return tmp which contains the value loaded. */
exp.value = build4 (TARGET_EXPR, nonatomic_type, tmp, func_call,
NULL_TREE, NULL_TREE);
}
return exp;
}
/* EXP is an expression of integer type. Apply the integer promotions
to it and return the promoted value. */
tree
perform_integral_promotions (tree exp)
{
tree type = TREE_TYPE (exp);
enum tree_code code = TREE_CODE (type);
gcc_assert (INTEGRAL_TYPE_P (type));
/* Normally convert enums to int,
but convert wide enums to something wider. */
if (code == ENUMERAL_TYPE)
{
type = c_common_type_for_size (MAX (TYPE_PRECISION (type),
TYPE_PRECISION (integer_type_node)),
((TYPE_PRECISION (type)
>= TYPE_PRECISION (integer_type_node))
&& TYPE_UNSIGNED (type)));
return convert (type, exp);
}
/* ??? This should no longer be needed now bit-fields have their
proper types. */
if (TREE_CODE (exp) == COMPONENT_REF
&& DECL_C_BIT_FIELD (TREE_OPERAND (exp, 1))
/* If it's thinner than an int, promote it like a
c_promoting_integer_type_p, otherwise leave it alone. */
&& compare_tree_int (DECL_SIZE (TREE_OPERAND (exp, 1)),
TYPE_PRECISION (integer_type_node)) < 0)
return convert (integer_type_node, exp);
if (c_promoting_integer_type_p (type))
{
/* Preserve unsignedness if not really getting any wider. */
if (TYPE_UNSIGNED (type)
&& TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node))
return convert (unsigned_type_node, exp);
return convert (integer_type_node, exp);
}
return exp;
}
/* Perform default promotions for C data used in expressions.
Enumeral types or short or char are converted to int.
In addition, manifest constants symbols are replaced by their values. */
tree
default_conversion (tree exp)
{
tree orig_exp;
tree type = TREE_TYPE (exp);
enum tree_code code = TREE_CODE (type);
tree promoted_type;
mark_exp_read (exp);
/* Functions and arrays have been converted during parsing. */
gcc_assert (code != FUNCTION_TYPE);
if (code == ARRAY_TYPE)
return exp;
/* Constants can be used directly unless they're not loadable. */
if (TREE_CODE (exp) == CONST_DECL)
exp = DECL_INITIAL (exp);
/* Strip no-op conversions. */
orig_exp = exp;
STRIP_TYPE_NOPS (exp);
if (TREE_NO_WARNING (orig_exp))
TREE_NO_WARNING (exp) = 1;
if (code == VOID_TYPE)
{
error_at (EXPR_LOC_OR_LOC (exp, input_location),
"void value not ignored as it ought to be");
return error_mark_node;
}
exp = require_complete_type (EXPR_LOC_OR_LOC (exp, input_location), exp);
if (exp == error_mark_node)
return error_mark_node;
promoted_type = targetm.promoted_type (type);
if (promoted_type)
return convert (promoted_type, exp);
if (INTEGRAL_TYPE_P (type))
return perform_integral_promotions (exp);
return exp;
}
/* Look up COMPONENT in a structure or union TYPE.
If the component name is not found, returns NULL_TREE. Otherwise,
the return value is a TREE_LIST, with each TREE_VALUE a FIELD_DECL
stepping down the chain to the component, which is in the last
TREE_VALUE of the list. Normally the list is of length one, but if
the component is embedded within (nested) anonymous structures or
unions, the list steps down the chain to the component. */
static tree
lookup_field (tree type, tree component)
{
tree field;
/* If TYPE_LANG_SPECIFIC is set, then it is a sorted array of pointers
to the field elements. Use a binary search on this array to quickly
find the element. Otherwise, do a linear search. TYPE_LANG_SPECIFIC
will always be set for structures which have many elements.
Duplicate field checking replaces duplicates with NULL_TREE so
TYPE_LANG_SPECIFIC arrays are potentially no longer sorted. In that
case just iterate using DECL_CHAIN. */
if (TYPE_LANG_SPECIFIC (type) && TYPE_LANG_SPECIFIC (type)->s
&& !seen_error ())
{
int bot, top, half;
tree *field_array = &TYPE_LANG_SPECIFIC (type)->s->elts[0];
field = TYPE_FIELDS (type);
bot = 0;
top = TYPE_LANG_SPECIFIC (type)->s->len;
while (top - bot > 1)
{
half = (top - bot + 1) >> 1;
field = field_array[bot+half];
if (DECL_NAME (field) == NULL_TREE)
{
/* Step through all anon unions in linear fashion. */
while (DECL_NAME (field_array[bot]) == NULL_TREE)
{
field = field_array[bot++];
if (RECORD_OR_UNION_TYPE_P (TREE_TYPE (field)))
{
tree anon = lookup_field (TREE_TYPE (field), component);
if (anon)
return tree_cons (NULL_TREE, field, anon);
/* The Plan 9 compiler permits referring
directly to an anonymous struct/union field
using a typedef name. */
if (flag_plan9_extensions
&& TYPE_NAME (TREE_TYPE (field)) != NULL_TREE
&& (TREE_CODE (TYPE_NAME (TREE_TYPE (field)))
== TYPE_DECL)
&& (DECL_NAME (TYPE_NAME (TREE_TYPE (field)))
== component))
break;
}
}
/* Entire record is only anon unions. */
if (bot > top)
return NULL_TREE;
/* Restart the binary search, with new lower bound. */
continue;
}
if (DECL_NAME (field) == component)
break;
if (DECL_NAME (field) < component)
bot += half;
else
top = bot + half;
}
if (DECL_NAME (field_array[bot]) == component)
field = field_array[bot];
else if (DECL_NAME (field) != component)
return NULL_TREE;
}
else
{
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
{
if (DECL_NAME (field) == NULL_TREE
&& RECORD_OR_UNION_TYPE_P (TREE_TYPE (field)))
{
tree anon = lookup_field (TREE_TYPE (field), component);
if (anon)
return tree_cons (NULL_TREE, field, anon);
/* The Plan 9 compiler permits referring directly to an
anonymous struct/union field using a typedef
name. */
if (flag_plan9_extensions
&& TYPE_NAME (TREE_TYPE (field)) != NULL_TREE
&& TREE_CODE (TYPE_NAME (TREE_TYPE (field))) == TYPE_DECL
&& (DECL_NAME (TYPE_NAME (TREE_TYPE (field)))
== component))
break;
}
if (DECL_NAME (field) == component)
break;
}
if (field == NULL_TREE)
return NULL_TREE;
}
return tree_cons (NULL_TREE, field, NULL_TREE);
}
/* Recursively append candidate IDENTIFIER_NODEs to CANDIDATES. */
static void
lookup_field_fuzzy_find_candidates (tree type, tree component,
vec<tree> *candidates)
{
tree field;
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
{
if (DECL_NAME (field) == NULL_TREE
&& RECORD_OR_UNION_TYPE_P (TREE_TYPE (field)))
lookup_field_fuzzy_find_candidates (TREE_TYPE (field), component,
candidates);
if (DECL_NAME (field))
candidates->safe_push (DECL_NAME (field));
}
}
/* Like "lookup_field", but find the closest matching IDENTIFIER_NODE,
rather than returning a TREE_LIST for an exact match. */
static tree
lookup_field_fuzzy (tree type, tree component)
{
gcc_assert (TREE_CODE (component) == IDENTIFIER_NODE);
/* First, gather a list of candidates. */
auto_vec <tree> candidates;
lookup_field_fuzzy_find_candidates (type, component,
&candidates);
return find_closest_identifier (component, &candidates);
}
/* Support function for build_component_ref's error-handling.
Given DATUM_TYPE, and "DATUM.COMPONENT", where DATUM is *not* a
struct or union, should we suggest "DATUM->COMPONENT" as a hint? */
static bool
should_suggest_deref_p (tree datum_type)
{
/* We don't do it for Objective-C, since Objective-C 2.0 dot-syntax
allows "." for ptrs; we could be handling a failed attempt
to access a property. */
if (c_dialect_objc ())
return false;
/* Only suggest it for pointers... */
if (TREE_CODE (datum_type) != POINTER_TYPE)
return false;
/* ...to structs/unions. */
tree underlying_type = TREE_TYPE (datum_type);
enum tree_code code = TREE_CODE (underlying_type);
if (code == RECORD_TYPE || code == UNION_TYPE)
return true;
else
return false;
}
/* Make an expression to refer to the COMPONENT field of structure or
union value DATUM. COMPONENT is an IDENTIFIER_NODE. LOC is the
location of the COMPONENT_REF. COMPONENT_LOC is the location
of COMPONENT. */
tree
build_component_ref (location_t loc, tree datum, tree component,
location_t component_loc)
{
tree type = TREE_TYPE (datum);
enum tree_code code = TREE_CODE (type);
tree field = NULL;
tree ref;
bool datum_lvalue = lvalue_p (datum);
if (!objc_is_public (datum, component))
return error_mark_node;
/* Detect Objective-C property syntax object.property. */
if (c_dialect_objc ()
&& (ref = objc_maybe_build_component_ref (datum, component)))
return ref;
/* See if there is a field or component with name COMPONENT. */
if (code == RECORD_TYPE || code == UNION_TYPE)
{
if (!COMPLETE_TYPE_P (type))
{
c_incomplete_type_error (loc, NULL_TREE, type);
return error_mark_node;
}
field = lookup_field (type, component);
if (!field)
{
tree guessed_id = lookup_field_fuzzy (type, component);
if (guessed_id)
{
/* Attempt to provide a fixit replacement hint, if
we have a valid range for the component. */
location_t reported_loc
= (component_loc != UNKNOWN_LOCATION) ? component_loc : loc;
gcc_rich_location rich_loc (reported_loc);
if (component_loc != UNKNOWN_LOCATION)
rich_loc.add_fixit_misspelled_id (component_loc, guessed_id);
error_at (&rich_loc,
"%qT has no member named %qE; did you mean %qE?",
type, component, guessed_id);
}
else
error_at (loc, "%qT has no member named %qE", type, component);
return error_mark_node;
}
/* Accessing elements of atomic structures or unions is undefined
behavior (C11 6.5.2.3#5). */
if (TYPE_ATOMIC (type) && c_inhibit_evaluation_warnings == 0)
{
if (code == RECORD_TYPE)
warning_at (loc, 0, "accessing a member %qE of an atomic "
"structure %qE", component, datum);
else
warning_at (loc, 0, "accessing a member %qE of an atomic "
"union %qE", component, datum);
}
/* Chain the COMPONENT_REFs if necessary down to the FIELD.
This might be better solved in future the way the C++ front
end does it - by giving the anonymous entities each a
separate name and type, and then have build_component_ref
recursively call itself. We can't do that here. */
do
{
tree subdatum = TREE_VALUE (field);
int quals;
tree subtype;
bool use_datum_quals;
if (TREE_TYPE (subdatum) == error_mark_node)
return error_mark_node;
/* If this is an rvalue, it does not have qualifiers in C
standard terms and we must avoid propagating such
qualifiers down to a non-lvalue array that is then
converted to a pointer. */
use_datum_quals = (datum_lvalue
|| TREE_CODE (TREE_TYPE (subdatum)) != ARRAY_TYPE);
quals = TYPE_QUALS (strip_array_types (TREE_TYPE (subdatum)));
if (use_datum_quals)
quals |= TYPE_QUALS (TREE_TYPE (datum));
subtype = c_build_qualified_type (TREE_TYPE (subdatum), quals);
ref = build3 (COMPONENT_REF, subtype, datum, subdatum,
NULL_TREE);
SET_EXPR_LOCATION (ref, loc);
if (TREE_READONLY (subdatum)
|| (use_datum_quals && TREE_READONLY (datum)))
TREE_READONLY (ref) = 1;
if (TREE_THIS_VOLATILE (subdatum)
|| (use_datum_quals && TREE_THIS_VOLATILE (datum)))
TREE_THIS_VOLATILE (ref) = 1;
if (TREE_DEPRECATED (subdatum))
warn_deprecated_use (subdatum, NULL_TREE);
datum = ref;
field = TREE_CHAIN (field);
}
while (field);
return ref;
}
else if (should_suggest_deref_p (type))
{
/* Special-case the error message for "ptr.field" for the case
where the user has confused "." vs "->". */
rich_location richloc (line_table, loc);
/* "loc" should be the "." token. */
richloc.add_fixit_replace ("->");
error_at (&richloc,
"%qE is a pointer; did you mean to use %<->%>?",
datum);
return error_mark_node;
}
else if (code != ERROR_MARK)
error_at (loc,
"request for member %qE in something not a structure or union",
component);
return error_mark_node;
}
/* Given an expression PTR for a pointer, return an expression
for the value pointed to.
ERRORSTRING is the name of the operator to appear in error messages.
LOC is the location to use for the generated tree. */
tree
build_indirect_ref (location_t loc, tree ptr, ref_operator errstring)
{
tree pointer = default_conversion (ptr);
tree type = TREE_TYPE (pointer);
tree ref;
if (TREE_CODE (type) == POINTER_TYPE)
{
if (CONVERT_EXPR_P (pointer)
|| TREE_CODE (pointer) == VIEW_CONVERT_EXPR)
{
/* If a warning is issued, mark it to avoid duplicates from
the backend. This only needs to be done at
warn_strict_aliasing > 2. */
if (warn_strict_aliasing > 2)
if (strict_aliasing_warning (EXPR_LOCATION (pointer),
type, TREE_OPERAND (pointer, 0)))
TREE_NO_WARNING (pointer) = 1;
}
if (TREE_CODE (pointer) == ADDR_EXPR
&& (TREE_TYPE (TREE_OPERAND (pointer, 0))
== TREE_TYPE (type)))
{
ref = TREE_OPERAND (pointer, 0);
protected_set_expr_location (ref, loc);
return ref;
}
else
{
tree t = TREE_TYPE (type);
ref = build1 (INDIRECT_REF, t, pointer);
if (VOID_TYPE_P (t) && c_inhibit_evaluation_warnings == 0)
warning_at (loc, 0, "dereferencing %<void *%> pointer");
/* We *must* set TREE_READONLY when dereferencing a pointer to const,
so that we get the proper error message if the result is used
to assign to. Also, &* is supposed to be a no-op.
And ANSI C seems to specify that the type of the result
should be the const type. */
/* A de-reference of a pointer to const is not a const. It is valid
to change it via some other pointer. */
TREE_READONLY (ref) = TYPE_READONLY (t);
TREE_SIDE_EFFECTS (ref)
= TYPE_VOLATILE (t) || TREE_SIDE_EFFECTS (pointer);
TREE_THIS_VOLATILE (ref) = TYPE_VOLATILE (t);
protected_set_expr_location (ref, loc);
return ref;
}
}
else if (TREE_CODE (pointer) != ERROR_MARK)
invalid_indirection_error (loc, type, errstring);
return error_mark_node;
}
/* This handles expressions of the form "a[i]", which denotes
an array reference.
This is logically equivalent in C to *(a+i), but we may do it differently.
If A is a variable or a member, we generate a primitive ARRAY_REF.
This avoids forcing the array out of registers, and can work on
arrays that are not lvalues (for example, members of structures returned
by functions).
For vector types, allow vector[i] but not i[vector], and create
*(((type*)&vectortype) + i) for the expression.
LOC is the location to use for the returned expression. */
tree
build_array_ref (location_t loc, tree array, tree index)
{
tree ret;
bool swapped = false;
if (TREE_TYPE (array) == error_mark_node
|| TREE_TYPE (index) == error_mark_node)
return error_mark_node;
if (TREE_CODE (TREE_TYPE (array)) != ARRAY_TYPE
&& TREE_CODE (TREE_TYPE (array)) != POINTER_TYPE
/* Allow vector[index] but not index[vector]. */
&& !gnu_vector_type_p (TREE_TYPE (array)))
{
if (TREE_CODE (TREE_TYPE (index)) != ARRAY_TYPE
&& TREE_CODE (TREE_TYPE (index)) != POINTER_TYPE)
{
error_at (loc,
"subscripted value is neither array nor pointer nor vector");
return error_mark_node;
}
std::swap (array, index);
swapped = true;
}
if (!INTEGRAL_TYPE_P (TREE_TYPE (index)))
{
error_at (loc, "array subscript is not an integer");
return error_mark_node;
}
if (TREE_CODE (TREE_TYPE (TREE_TYPE (array))) == FUNCTION_TYPE)
{
error_at (loc, "subscripted value is pointer to function");
return error_mark_node;
}
/* ??? Existing practice has been to warn only when the char
index is syntactically the index, not for char[array]. */
if (!swapped)
warn_array_subscript_with_type_char (loc, index);
/* Apply default promotions *after* noticing character types. */
index = default_conversion (index);
if (index == error_mark_node)
return error_mark_node;
gcc_assert (TREE_CODE (TREE_TYPE (index)) == INTEGER_TYPE);
bool was_vector = VECTOR_TYPE_P (TREE_TYPE (array));
bool non_lvalue = convert_vector_to_array_for_subscript (loc, &array, index);
if (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE)
{
tree rval, type;
/* An array that is indexed by a non-constant
cannot be stored in a register; we must be able to do
address arithmetic on its address.
Likewise an array of elements of variable size. */
if (TREE_CODE (index) != INTEGER_CST
|| (COMPLETE_TYPE_P (TREE_TYPE (TREE_TYPE (array)))
&& TREE_CODE (TYPE_SIZE (TREE_TYPE (TREE_TYPE (array)))) != INTEGER_CST))
{
if (!c_mark_addressable (array, true))
return error_mark_node;
}
/* An array that is indexed by a constant value which is not within
the array bounds cannot be stored in a register either; because we
would get a crash in store_bit_field/extract_bit_field when trying
to access a non-existent part of the register. */
if (TREE_CODE (index) == INTEGER_CST
&& TYPE_DOMAIN (TREE_TYPE (array))
&& !int_fits_type_p (index, TYPE_DOMAIN (TREE_TYPE (array))))
{
if (!c_mark_addressable (array))
return error_mark_node;
}
if ((pedantic || warn_c90_c99_compat)
&& ! was_vector)
{
tree foo = array;
while (TREE_CODE (foo) == COMPONENT_REF)
foo = TREE_OPERAND (foo, 0);
if (VAR_P (foo) && C_DECL_REGISTER (foo))
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids subscripting %<register%> array");
else if (!lvalue_p (foo))
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 forbids subscripting non-lvalue "
"array");
}
type = TREE_TYPE (TREE_TYPE (array));
rval = build4 (ARRAY_REF, type, array, index, NULL_TREE, NULL_TREE);
/* Array ref is const/volatile if the array elements are
or if the array is. */
TREE_READONLY (rval)
|= (TYPE_READONLY (TREE_TYPE (TREE_TYPE (array)))
| TREE_READONLY (array));
TREE_SIDE_EFFECTS (rval)
|= (TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (array)))
| TREE_SIDE_EFFECTS (array));
TREE_THIS_VOLATILE (rval)
|= (TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (array)))
/* This was added by rms on 16 Nov 91.
It fixes vol struct foo *a; a->elts[1]
in an inline function.
Hope it doesn't break something else. */
| TREE_THIS_VOLATILE (array));
ret = require_complete_type (loc, rval);
protected_set_expr_location (ret, loc);
if (non_lvalue)
ret = non_lvalue_loc (loc, ret);
return ret;
}
else
{
tree ar = default_conversion (array);
if (ar == error_mark_node)
return ar;
gcc_assert (TREE_CODE (TREE_TYPE (ar)) == POINTER_TYPE);
gcc_assert (TREE_CODE (TREE_TYPE (TREE_TYPE (ar))) != FUNCTION_TYPE);
ret = build_indirect_ref (loc, build_binary_op (loc, PLUS_EXPR, ar,
index, false),
RO_ARRAY_INDEXING);
if (non_lvalue)
ret = non_lvalue_loc (loc, ret);
return ret;
}
}
/* Build an external reference to identifier ID. FUN indicates
whether this will be used for a function call. LOC is the source
location of the identifier. This sets *TYPE to the type of the
identifier, which is not the same as the type of the returned value
for CONST_DECLs defined as enum constants. If the type of the
identifier is not available, *TYPE is set to NULL. */
tree
build_external_ref (location_t loc, tree id, bool fun, tree *type)
{
tree ref;
tree decl = lookup_name (id);
/* In Objective-C, an instance variable (ivar) may be preferred to
whatever lookup_name() found. */
decl = objc_lookup_ivar (decl, id);
*type = NULL;
if (decl && decl != error_mark_node)
{
ref = decl;
*type = TREE_TYPE (ref);
}
else if (fun)
/* Implicit function declaration. */
ref = implicitly_declare (loc, id);
else if (decl == error_mark_node)
/* Don't complain about something that's already been
complained about. */
return error_mark_node;
else
{
undeclared_variable (loc, id);
return error_mark_node;
}
if (TREE_TYPE (ref) == error_mark_node)
return error_mark_node;
if (TREE_DEPRECATED (ref))
warn_deprecated_use (ref, NULL_TREE);
/* Recursive call does not count as usage. */
if (ref != current_function_decl)
{
TREE_USED (ref) = 1;
}
if (TREE_CODE (ref) == FUNCTION_DECL && !in_alignof)
{
if (!in_sizeof && !in_typeof)
C_DECL_USED (ref) = 1;
else if (DECL_INITIAL (ref) == NULL_TREE
&& DECL_EXTERNAL (ref)
&& !TREE_PUBLIC (ref))
record_maybe_used_decl (ref);
}
if (TREE_CODE (ref) == CONST_DECL)
{
used_types_insert (TREE_TYPE (ref));
if (warn_cxx_compat
&& TREE_CODE (TREE_TYPE (ref)) == ENUMERAL_TYPE
&& C_TYPE_DEFINED_IN_STRUCT (TREE_TYPE (ref)))
{
warning_at (loc, OPT_Wc___compat,
("enum constant defined in struct or union "
"is not visible in C++"));
inform (DECL_SOURCE_LOCATION (ref), "enum constant defined here");
}
ref = DECL_INITIAL (ref);
TREE_CONSTANT (ref) = 1;
}
else if (current_function_decl != NULL_TREE
&& !DECL_FILE_SCOPE_P (current_function_decl)
&& (VAR_OR_FUNCTION_DECL_P (ref)
|| TREE_CODE (ref) == PARM_DECL))
{
tree context = decl_function_context (ref);
if (context != NULL_TREE && context != current_function_decl)
DECL_NONLOCAL (ref) = 1;
}
/* C99 6.7.4p3: An inline definition of a function with external
linkage ... shall not contain a reference to an identifier with
internal linkage. */
else if (current_function_decl != NULL_TREE
&& DECL_DECLARED_INLINE_P (current_function_decl)
&& DECL_EXTERNAL (current_function_decl)
&& VAR_OR_FUNCTION_DECL_P (ref)
&& (!VAR_P (ref) || TREE_STATIC (ref))
&& ! TREE_PUBLIC (ref)
&& DECL_CONTEXT (ref) != current_function_decl)
record_inline_static (loc, current_function_decl, ref,
csi_internal);
return ref;
}
/* Record details of decls possibly used inside sizeof or typeof. */
struct maybe_used_decl
{
/* The decl. */
tree decl;
/* The level seen at (in_sizeof + in_typeof). */
int level;
/* The next one at this level or above, or NULL. */
struct maybe_used_decl *next;
};
static struct maybe_used_decl *maybe_used_decls;
/* Record that DECL, an undefined static function reference seen
inside sizeof or typeof, might be used if the operand of sizeof is
a VLA type or the operand of typeof is a variably modified
type. */
static void
record_maybe_used_decl (tree decl)
{
struct maybe_used_decl *t = XOBNEW (&parser_obstack, struct maybe_used_decl);
t->decl = decl;
t->level = in_sizeof + in_typeof;
t->next = maybe_used_decls;
maybe_used_decls = t;
}
/* Pop the stack of decls possibly used inside sizeof or typeof. If
USED is false, just discard them. If it is true, mark them used
(if no longer inside sizeof or typeof) or move them to the next
level up (if still inside sizeof or typeof). */
void
pop_maybe_used (bool used)
{
struct maybe_used_decl *p = maybe_used_decls;
int cur_level = in_sizeof + in_typeof;
while (p && p->level > cur_level)
{
if (used)
{
if (cur_level == 0)
C_DECL_USED (p->decl) = 1;
else
p->level = cur_level;
}
p = p->next;
}
if (!used || cur_level == 0)
maybe_used_decls = p;
}
/* Return the result of sizeof applied to EXPR. */
struct c_expr
c_expr_sizeof_expr (location_t loc, struct c_expr expr)
{
struct c_expr ret;
if (expr.value == error_mark_node)
{
ret.value = error_mark_node;
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
pop_maybe_used (false);
}
else
{
bool expr_const_operands = true;
if (TREE_CODE (expr.value) == PARM_DECL
&& C_ARRAY_PARAMETER (expr.value))
{
auto_diagnostic_group d;
if (warning_at (loc, OPT_Wsizeof_array_argument,
"%<sizeof%> on array function parameter %qE will "
"return size of %qT", expr.value,
TREE_TYPE (expr.value)))
inform (DECL_SOURCE_LOCATION (expr.value), "declared here");
}
tree folded_expr = c_fully_fold (expr.value, require_constant_value,
&expr_const_operands);
ret.value = c_sizeof (loc, TREE_TYPE (folded_expr));
c_last_sizeof_arg = expr.value;
c_last_sizeof_loc = loc;
ret.original_code = SIZEOF_EXPR;
ret.original_type = NULL;
if (c_vla_type_p (TREE_TYPE (folded_expr)))
{
/* sizeof is evaluated when given a vla (C99 6.5.3.4p2). */
ret.value = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (ret.value),
folded_expr, ret.value);
C_MAYBE_CONST_EXPR_NON_CONST (ret.value) = !expr_const_operands;
SET_EXPR_LOCATION (ret.value, loc);
}
pop_maybe_used (C_TYPE_VARIABLE_SIZE (TREE_TYPE (folded_expr)));
}
return ret;
}
/* Return the result of sizeof applied to T, a structure for the type
name passed to sizeof (rather than the type itself). LOC is the
location of the original expression. */
struct c_expr
c_expr_sizeof_type (location_t loc, struct c_type_name *t)
{
tree type;
struct c_expr ret;
tree type_expr = NULL_TREE;
bool type_expr_const = true;
type = groktypename (t, &type_expr, &type_expr_const);
ret.value = c_sizeof (loc, type);
c_last_sizeof_arg = type;
c_last_sizeof_loc = loc;
ret.original_code = SIZEOF_EXPR;
ret.original_type = NULL;
if ((type_expr || TREE_CODE (ret.value) == INTEGER_CST)
&& c_vla_type_p (type))
{
/* If the type is a [*] array, it is a VLA but is represented as
having a size of zero. In such a case we must ensure that
the result of sizeof does not get folded to a constant by
c_fully_fold, because if the size is evaluated the result is
not constant and so constraints on zero or negative size
arrays must not be applied when this sizeof call is inside
another array declarator. */
if (!type_expr)
type_expr = integer_zero_node;
ret.value = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (ret.value),
type_expr, ret.value);
C_MAYBE_CONST_EXPR_NON_CONST (ret.value) = !type_expr_const;
}
pop_maybe_used (type != error_mark_node
? C_TYPE_VARIABLE_SIZE (type) : false);
return ret;
}
/* Build a function call to function FUNCTION with parameters PARAMS.
The function call is at LOC.
PARAMS is a list--a chain of TREE_LIST nodes--in which the
TREE_VALUE of each node is a parameter-expression.
FUNCTION's data type may be a function type or a pointer-to-function. */
tree
build_function_call (location_t loc, tree function, tree params)
{
vec<tree, va_gc> *v;
tree ret;
vec_alloc (v, list_length (params));
for (; params; params = TREE_CHAIN (params))
v->quick_push (TREE_VALUE (params));
ret = c_build_function_call_vec (loc, vNULL, function, v, NULL);
vec_free (v);
return ret;
}
/* Give a note about the location of the declaration of DECL. */
static void
inform_declaration (tree decl)
{
if (decl && (TREE_CODE (decl) != FUNCTION_DECL || !DECL_IS_BUILTIN (decl)))
inform (DECL_SOURCE_LOCATION (decl), "declared here");
}
/* Build a function call to function FUNCTION with parameters PARAMS.
If FUNCTION is the result of resolving an overloaded target built-in,
ORIG_FUNDECL is the original function decl, otherwise it is null.
ORIGTYPES, if not NULL, is a vector of types; each element is
either NULL or the original type of the corresponding element in
PARAMS. The original type may differ from TREE_TYPE of the
parameter for enums. FUNCTION's data type may be a function type
or pointer-to-function. This function changes the elements of
PARAMS. */
tree
build_function_call_vec (location_t loc, vec<location_t> arg_loc,
tree function, vec<tree, va_gc> *params,
vec<tree, va_gc> *origtypes, tree orig_fundecl)
{
tree fntype, fundecl = NULL_TREE;
tree name = NULL_TREE, result;
tree tem;
int nargs;
tree *argarray;
/* Strip NON_LVALUE_EXPRs, etc., since we aren't using as an lvalue. */
STRIP_TYPE_NOPS (function);
/* Convert anything with function type to a pointer-to-function. */
if (TREE_CODE (function) == FUNCTION_DECL)
{
name = DECL_NAME (function);
if (flag_tm)
tm_malloc_replacement (function);
fundecl = function;
if (!orig_fundecl)
orig_fundecl = fundecl;
/* Atomic functions have type checking/casting already done. They are
often rewritten and don't match the original parameter list. */
if (name && !strncmp (IDENTIFIER_POINTER (name), "__atomic_", 9))
origtypes = NULL;
}
if (TREE_CODE (TREE_TYPE (function)) == FUNCTION_TYPE)
function = function_to_pointer_conversion (loc, function);
/* For Objective-C, convert any calls via a cast to OBJC_TYPE_REF
expressions, like those used for ObjC messenger dispatches. */
if (params && !params->is_empty ())
function = objc_rewrite_function_call (function, (*params)[0]);
function = c_fully_fold (function, false, NULL);
fntype = TREE_TYPE (function);
if (TREE_CODE (fntype) == ERROR_MARK)
return error_mark_node;
if (!(TREE_CODE (fntype) == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (fntype)) == FUNCTION_TYPE))
{
if (!flag_diagnostics_show_caret)
error_at (loc,
"called object %qE is not a function or function pointer",
function);
else if (DECL_P (function))
{
error_at (loc,
"called object %qD is not a function or function pointer",
function);
inform_declaration (function);
}
else
error_at (loc,
"called object is not a function or function pointer");
return error_mark_node;
}
if (fundecl && TREE_THIS_VOLATILE (fundecl))
current_function_returns_abnormally = 1;
/* fntype now gets the type of function pointed to. */
fntype = TREE_TYPE (fntype);
/* Convert the parameters to the types declared in the
function prototype, or apply default promotions. */
nargs = convert_arguments (loc, arg_loc, TYPE_ARG_TYPES (fntype), params,
origtypes, function, fundecl);
if (nargs < 0)
return error_mark_node;
/* Check that the function is called through a compatible prototype.
If it is not, warn. */
if (CONVERT_EXPR_P (function)
&& TREE_CODE (tem = TREE_OPERAND (function, 0)) == ADDR_EXPR
&& TREE_CODE (tem = TREE_OPERAND (tem, 0)) == FUNCTION_DECL
&& !comptypes (fntype, TREE_TYPE (tem)))
{
tree return_type = TREE_TYPE (fntype);
/* This situation leads to run-time undefined behavior. We can't,
therefore, simply error unless we can prove that all possible
executions of the program must execute the code. */
warning_at (loc, 0, "function called through a non-compatible type");
if (VOID_TYPE_P (return_type)
&& TYPE_QUALS (return_type) != TYPE_UNQUALIFIED)
pedwarn (loc, 0,
"function with qualified void return type called");
}
argarray = vec_safe_address (params);
/* Check that arguments to builtin functions match the expectations. */
if (fundecl
&& fndecl_built_in_p (fundecl)
&& !check_builtin_function_arguments (loc, arg_loc, fundecl,
orig_fundecl, nargs, argarray))
return error_mark_node;
/* Check that the arguments to the function are valid. */
bool warned_p = check_function_arguments (loc, fundecl, fntype,
nargs, argarray, &arg_loc);
if (name != NULL_TREE
&& !strncmp (IDENTIFIER_POINTER (name), "__builtin_", 10))
{
if (require_constant_value)
result
= fold_build_call_array_initializer_loc (loc, TREE_TYPE (fntype),
function, nargs, argarray);
else
result = fold_build_call_array_loc (loc, TREE_TYPE (fntype),
function, nargs, argarray);
if (TREE_CODE (result) == NOP_EXPR
&& TREE_CODE (TREE_OPERAND (result, 0)) == INTEGER_CST)
STRIP_TYPE_NOPS (result);
}
else
result = build_call_array_loc (loc, TREE_TYPE (fntype),
function, nargs, argarray);
/* If -Wnonnull warning has been diagnosed, avoid diagnosing it again
later. */
if (warned_p && TREE_CODE (result) == CALL_EXPR)
TREE_NO_WARNING (result) = 1;
/* In this improbable scenario, a nested function returns a VM type.
Create a TARGET_EXPR so that the call always has a LHS, much as
what the C++ FE does for functions returning non-PODs. */
if (variably_modified_type_p (TREE_TYPE (fntype), NULL_TREE))
{
tree tmp = create_tmp_var_raw (TREE_TYPE (fntype));
result = build4 (TARGET_EXPR, TREE_TYPE (fntype), tmp, result,
NULL_TREE, NULL_TREE);
}
if (VOID_TYPE_P (TREE_TYPE (result)))
{
if (TYPE_QUALS (TREE_TYPE (result)) != TYPE_UNQUALIFIED)
pedwarn (loc, 0,
"function with qualified void return type called");
return result;
}
return require_complete_type (loc, result);
}
/* Like build_function_call_vec, but call also resolve_overloaded_builtin. */
tree
c_build_function_call_vec (location_t loc, vec<location_t> arg_loc,
tree function, vec<tree, va_gc> *params,
vec<tree, va_gc> *origtypes)
{
/* Strip NON_LVALUE_EXPRs, etc., since we aren't using as an lvalue. */
STRIP_TYPE_NOPS (function);
/* Convert anything with function type to a pointer-to-function. */
if (TREE_CODE (function) == FUNCTION_DECL)
{
/* Implement type-directed function overloading for builtins.
resolve_overloaded_builtin and targetm.resolve_overloaded_builtin
handle all the type checking. The result is a complete expression
that implements this function call. */
tree tem = resolve_overloaded_builtin (loc, function, params);
if (tem)
return tem;
}
return build_function_call_vec (loc, arg_loc, function, params, origtypes);
}
/* Helper for convert_arguments called to convert the VALue of argument
number ARGNUM from ORIGTYPE to the corresponding parameter number
PARMNUM and TYPE.
PLOC is the location where the conversion is being performed.
FUNCTION and FUNDECL are the same as in convert_arguments.
VALTYPE is the original type of VAL before the conversion and,
for EXCESS_PRECISION_EXPR, the operand of the expression.
NPC is true if VAL represents the null pointer constant (VAL itself
will have been folded to an integer constant).
RNAME is the same as FUNCTION except in Objective C when it's
the function selector.
EXCESS_PRECISION is true when VAL was originally represented
as EXCESS_PRECISION_EXPR.
WARNOPT is the same as in convert_for_assignment. */
static tree
convert_argument (location_t ploc, tree function, tree fundecl,
tree type, tree origtype, tree val, tree valtype,
bool npc, tree rname, int parmnum, int argnum,
bool excess_precision, int warnopt)
{
/* Formal parm type is specified by a function prototype. */
if (type == error_mark_node || !COMPLETE_TYPE_P (type))
{
error_at (ploc, "type of formal parameter %d is incomplete",
parmnum + 1);
return val;
}
/* Optionally warn about conversions that differ from the default
conversions. */
if (warn_traditional_conversion || warn_traditional)
{
unsigned int formal_prec = TYPE_PRECISION (type);
if (INTEGRAL_TYPE_P (type)
&& TREE_CODE (valtype) == REAL_TYPE)
warning_at (ploc, OPT_Wtraditional_conversion,
"passing argument %d of %qE as integer rather "
"than floating due to prototype",
argnum, rname);
if (INTEGRAL_TYPE_P (type)
&& TREE_CODE (valtype) == COMPLEX_TYPE)
warning_at (ploc, OPT_Wtraditional_conversion,
"passing argument %d of %qE as integer rather "
"than complex due to prototype",
argnum, rname);
else if (TREE_CODE (type) == COMPLEX_TYPE
&& TREE_CODE (valtype) == REAL_TYPE)
warning_at (ploc, OPT_Wtraditional_conversion,
"passing argument %d of %qE as complex rather "
"than floating due to prototype",
argnum, rname);
else if (TREE_CODE (type) == REAL_TYPE
&& INTEGRAL_TYPE_P (valtype))
warning_at (ploc, OPT_Wtraditional_conversion,
"passing argument %d of %qE as floating rather "
"than integer due to prototype",
argnum, rname);
else if (TREE_CODE (type) == COMPLEX_TYPE
&& INTEGRAL_TYPE_P (valtype))
warning_at (ploc, OPT_Wtraditional_conversion,
"passing argument %d of %qE as complex rather "
"than integer due to prototype",
argnum, rname);
else if (TREE_CODE (type) == REAL_TYPE
&& TREE_CODE (valtype) == COMPLEX_TYPE)
warning_at (ploc, OPT_Wtraditional_conversion,
"passing argument %d of %qE as floating rather "
"than complex due to prototype",
argnum, rname);
/* ??? At some point, messages should be written about
conversions between complex types, but that's too messy
to do now. */
else if (TREE_CODE (type) == REAL_TYPE
&& TREE_CODE (valtype) == REAL_TYPE)
{
/* Warn if any argument is passed as `float',
since without a prototype it would be `double'. */
if (formal_prec == TYPE_PRECISION (float_type_node)
&& type != dfloat32_type_node)
warning_at (ploc, 0,
"passing argument %d of %qE as %<float%> "
"rather than %<double%> due to prototype",
argnum, rname);
/* Warn if mismatch between argument and prototype
for decimal float types. Warn of conversions with
binary float types and of precision narrowing due to
prototype. */
else if (type != valtype
&& (type == dfloat32_type_node
|| type == dfloat64_type_node
|| type == dfloat128_type_node
|| valtype == dfloat32_type_node
|| valtype == dfloat64_type_node
|| valtype == dfloat128_type_node)
&& (formal_prec
<= TYPE_PRECISION (valtype)
|| (type == dfloat128_type_node
&& (valtype
!= dfloat64_type_node
&& (valtype
!= dfloat32_type_node)))
|| (type == dfloat64_type_node
&& (valtype
!= dfloat32_type_node))))
warning_at (ploc, 0,
"passing argument %d of %qE as %qT "
"rather than %qT due to prototype",
argnum, rname, type, valtype);
}
/* Detect integer changing in width or signedness.
These warnings are only activated with
-Wtraditional-conversion, not with -Wtraditional. */
else if (warn_traditional_conversion
&& INTEGRAL_TYPE_P (type)
&& INTEGRAL_TYPE_P (valtype))
{
tree would_have_been = default_conversion (val);
tree type1 = TREE_TYPE (would_have_been);
if (val == error_mark_node)
/* VAL could have been of incomplete type. */;
else if (TREE_CODE (type) == ENUMERAL_TYPE
&& (TYPE_MAIN_VARIANT (type)
== TYPE_MAIN_VARIANT (valtype)))
/* No warning if function asks for enum
and the actual arg is that enum type. */
;
else if (formal_prec != TYPE_PRECISION (type1))
warning_at (ploc, OPT_Wtraditional_conversion,
"passing argument %d of %qE "
"with different width due to prototype",
argnum, rname);
else if (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (type1))
;
/* Don't complain if the formal parameter type
is an enum, because we can't tell now whether
the value was an enum--even the same enum. */
else if (TREE_CODE (type) == ENUMERAL_TYPE)
;
else if (TREE_CODE (val) == INTEGER_CST
&& int_fits_type_p (val, type))
/* Change in signedness doesn't matter
if a constant value is unaffected. */
;
/* If the value is extended from a narrower
unsigned type, it doesn't matter whether we
pass it as signed or unsigned; the value
certainly is the same either way. */
else if (TYPE_PRECISION (valtype) < TYPE_PRECISION (type)
&& TYPE_UNSIGNED (valtype))
;
else if (TYPE_UNSIGNED (type))
warning_at (ploc, OPT_Wtraditional_conversion,
"passing argument %d of %qE "
"as unsigned due to prototype",
argnum, rname);
else
warning_at (ploc, OPT_Wtraditional_conversion,
"passing argument %d of %qE "
"as signed due to prototype",
argnum, rname);
}
}
/* Possibly restore an EXCESS_PRECISION_EXPR for the
sake of better warnings from convert_and_check. */
if (excess_precision)
val = build1 (EXCESS_PRECISION_EXPR, valtype, val);
tree parmval = convert_for_assignment (ploc, ploc, type,
val, origtype, ic_argpass,
npc, fundecl, function,
parmnum + 1, warnopt);
if (targetm.calls.promote_prototypes (fundecl ? TREE_TYPE (fundecl) : 0)
&& INTEGRAL_TYPE_P (type)
&& (TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)))
parmval = default_conversion (parmval);
return parmval;
}
/* Convert the argument expressions in the vector VALUES
to the types in the list TYPELIST.
If TYPELIST is exhausted, or when an element has NULL as its type,
perform the default conversions.
ORIGTYPES is the original types of the expressions in VALUES. This
holds the type of enum values which have been converted to integral
types. It may be NULL.
FUNCTION is a tree for the called function. It is used only for
error messages, where it is formatted with %qE.
This is also where warnings about wrong number of args are generated.
ARG_LOC are locations of function arguments (if any).
Returns the actual number of arguments processed (which may be less
than the length of VALUES in some error situations), or -1 on
failure. */
static int
convert_arguments (location_t loc, vec<location_t> arg_loc, tree typelist,
vec<tree, va_gc> *values, vec<tree, va_gc> *origtypes,
tree function, tree fundecl)
{
unsigned int parmnum;
bool error_args = false;
const bool type_generic = fundecl
&& lookup_attribute ("type generic", TYPE_ATTRIBUTES (TREE_TYPE (fundecl)));
bool type_generic_remove_excess_precision = false;
bool type_generic_overflow_p = false;
tree selector;
/* Change pointer to function to the function itself for
diagnostics. */
if (TREE_CODE (function) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (function, 0)) == FUNCTION_DECL)
function = TREE_OPERAND (function, 0);
/* Handle an ObjC selector specially for diagnostics. */
selector = objc_message_selector ();
/* For a call to a built-in function declared without a prototype,
set to the built-in function's argument list. */
tree builtin_typelist = NULL_TREE;
/* For type-generic built-in functions, determine whether excess
precision should be removed (classification) or not
(comparison). */
if (fundecl
&& fndecl_built_in_p (fundecl, BUILT_IN_NORMAL))
{
built_in_function code = DECL_FUNCTION_CODE (fundecl);
if (C_DECL_BUILTIN_PROTOTYPE (fundecl))
{
/* For a call to a built-in function declared without a prototype
use the types of the parameters of the internal built-in to
match those of the arguments to. */
if (tree bdecl = builtin_decl_explicit (code))
builtin_typelist = TYPE_ARG_TYPES (TREE_TYPE (bdecl));
}
/* For type-generic built-in functions, determine whether excess
precision should be removed (classification) or not
(comparison). */
if (type_generic)
switch (code)
{
case BUILT_IN_ISFINITE:
case BUILT_IN_ISINF:
case BUILT_IN_ISINF_SIGN:
case BUILT_IN_ISNAN:
case BUILT_IN_ISNORMAL:
case BUILT_IN_FPCLASSIFY:
type_generic_remove_excess_precision = true;
break;
case BUILT_IN_ADD_OVERFLOW_P:
case BUILT_IN_SUB_OVERFLOW_P:
case BUILT_IN_MUL_OVERFLOW_P:
/* The last argument of these type-generic builtins
should not be promoted. */
type_generic_overflow_p = true;
break;
default:
break;
}
}
/* Scan the given expressions (VALUES) and types (TYPELIST), producing
individual converted arguments. */
tree typetail, builtin_typetail, val;
for (typetail = typelist,
builtin_typetail = builtin_typelist,
parmnum = 0;
values && values->iterate (parmnum, &val);
++parmnum)
{
/* The type of the function parameter (if it was declared with one). */
tree type = typetail ? TREE_VALUE (typetail) : NULL_TREE;
/* The type of the built-in function parameter (if the function
is a built-in). Used to detect type incompatibilities in
calls to built-ins declared without a prototype. */
tree builtin_type = (builtin_typetail
? TREE_VALUE (builtin_typetail) : NULL_TREE);
/* The original type of the argument being passed to the function. */
tree valtype = TREE_TYPE (val);
/* The called function (or function selector in Objective C). */
tree rname = function;
int argnum = parmnum + 1;
const char *invalid_func_diag;
/* Set for EXCESS_PRECISION_EXPR arguments. */
bool excess_precision = false;
/* The value of the argument after conversion to the type
of the function parameter it is passed to. */
tree parmval;
/* Some __atomic_* builtins have additional hidden argument at
position 0. */
location_t ploc
= !arg_loc.is_empty () && values->length () == arg_loc.length ()
? expansion_point_location_if_in_system_header (arg_loc[parmnum])
: input_location;
if (type == void_type_node)
{
if (selector)
error_at (loc, "too many arguments to method %qE", selector);
else
error_at (loc, "too many arguments to function %qE", function);
inform_declaration (fundecl);
return error_args ? -1 : (int) parmnum;
}
if (builtin_type == void_type_node)
{
if (warning_at (loc, OPT_Wbuiltin_declaration_mismatch,
"too many arguments to built-in function %qE "
"expecting %d", function, parmnum))
inform_declaration (fundecl);
builtin_typetail = NULL_TREE;
}
if (selector && argnum > 2)
{
rname = selector;
argnum -= 2;
}
/* Determine if VAL is a null pointer constant before folding it. */
bool npc = null_pointer_constant_p (val);
/* If there is excess precision and a prototype, convert once to
the required type rather than converting via the semantic
type. Likewise without a prototype a float value represented
as long double should be converted once to double. But for
type-generic classification functions excess precision must
be removed here. */
if (TREE_CODE (val) == EXCESS_PRECISION_EXPR
&& (type || !type_generic || !type_generic_remove_excess_precision))
{
val = TREE_OPERAND (val, 0);
excess_precision = true;
}
val = c_fully_fold (val, false, NULL);
STRIP_TYPE_NOPS (val);
val = require_complete_type (ploc, val);
/* Some floating-point arguments must be promoted to double when
no type is specified by a prototype. This applies to
arguments of type float, and to architecture-specific types
(ARM __fp16), but not to _FloatN or _FloatNx types. */
bool promote_float_arg = false;
if (type == NULL_TREE
&& TREE_CODE (valtype) == REAL_TYPE
&& (TYPE_PRECISION (valtype)
<= TYPE_PRECISION (double_type_node))
&& TYPE_MAIN_VARIANT (valtype) != double_type_node
&& TYPE_MAIN_VARIANT (valtype) != long_double_type_node
&& !DECIMAL_FLOAT_MODE_P (TYPE_MODE (valtype)))
{
/* Promote this argument, unless it has a _FloatN or
_FloatNx type. */
promote_float_arg = true;
for (int i = 0; i < NUM_FLOATN_NX_TYPES; i++)
if (TYPE_MAIN_VARIANT (valtype) == FLOATN_NX_TYPE_NODE (i))
{
promote_float_arg = false;
break;
}
}
if (type != NULL_TREE)
{
tree origtype = (!origtypes) ? NULL_TREE : (*origtypes)[parmnum];
parmval = convert_argument (ploc, function, fundecl, type, origtype,
val, valtype, npc, rname, parmnum, argnum,
excess_precision, 0);
}
else if (promote_float_arg)
{
if (type_generic)
parmval = val;
else
{
/* Convert `float' to `double'. */
if (warn_double_promotion && !c_inhibit_evaluation_warnings)
warning_at (ploc, OPT_Wdouble_promotion,
"implicit conversion from %qT to %qT when passing "
"argument to function",
valtype, double_type_node);
parmval = convert (double_type_node, val);
}
}
else if ((excess_precision && !type_generic)
|| (type_generic_overflow_p && parmnum == 2))
/* A "double" argument with excess precision being passed
without a prototype or in variable arguments.
The last argument of __builtin_*_overflow_p should not be
promoted. */
parmval = convert (valtype, val);
else if ((invalid_func_diag =
targetm.calls.invalid_arg_for_unprototyped_fn (typelist, fundecl, val)))
{
error (invalid_func_diag);
return -1;
}
else if (TREE_CODE (val) == ADDR_EXPR && reject_gcc_builtin (val))
{
return -1;
}
else
/* Convert `short' and `char' to full-size `int'. */
parmval = default_conversion (val);
(*values)[parmnum] = parmval;
if (parmval == error_mark_node)
error_args = true;
if (!type && builtin_type && TREE_CODE (builtin_type) != VOID_TYPE)
{
/* For a call to a built-in function declared without a prototype,
perform the conversions from the argument to the expected type
but issue warnings rather than errors for any mismatches.
Ignore the converted argument and use the PARMVAL obtained
above by applying default conversions instead. */
tree origtype = (!origtypes) ? NULL_TREE : (*origtypes)[parmnum];
convert_argument (ploc, function, fundecl, builtin_type, origtype,
val, valtype, npc, rname, parmnum, argnum,
excess_precision,
OPT_Wbuiltin_declaration_mismatch);
}
if (typetail)
typetail = TREE_CHAIN (typetail);
if (builtin_typetail)
builtin_typetail = TREE_CHAIN (builtin_typetail);
}
gcc_assert (parmnum == vec_safe_length (values));
if (typetail != NULL_TREE && TREE_VALUE (typetail) != void_type_node)
{
error_at (loc, "too few arguments to function %qE", function);
inform_declaration (fundecl);
return -1;
}
if (builtin_typetail && TREE_VALUE (builtin_typetail) != void_type_node)
{
unsigned nargs = parmnum;
for (tree t = builtin_typetail; t; t = TREE_CHAIN (t))
++nargs;
if (warning_at (loc, OPT_Wbuiltin_declaration_mismatch,
"too few arguments to built-in function %qE "
"expecting %u", function, nargs - 1))
inform_declaration (fundecl);
}
return error_args ? -1 : (int) parmnum;
}
/* This is the entry point used by the parser to build unary operators
in the input. CODE, a tree_code, specifies the unary operator, and
ARG is the operand. For unary plus, the C parser currently uses
CONVERT_EXPR for code.
LOC is the location to use for the tree generated.
*/
struct c_expr
parser_build_unary_op (location_t loc, enum tree_code code, struct c_expr arg)
{
struct c_expr result;
result.original_code = code;
result.original_type = NULL;
if (reject_gcc_builtin (arg.value))
{
result.value = error_mark_node;
}
else
{
result.value = build_unary_op (loc, code, arg.value, false);
if (TREE_OVERFLOW_P (result.value) && !TREE_OVERFLOW_P (arg.value))
overflow_warning (loc, result.value, arg.value);
}
/* We are typically called when parsing a prefix token at LOC acting on
ARG. Reflect this by updating the source range of the result to
start at LOC and end at the end of ARG. */
set_c_expr_source_range (&result,
loc, arg.get_finish ());
return result;
}
/* Returns true if TYPE is a character type, *not* including wchar_t. */
static bool
char_type_p (tree type)
{
return (type == char_type_node
|| type == unsigned_char_type_node
|| type == signed_char_type_node
|| type == char16_type_node
|| type == char32_type_node);
}
/* This is the entry point used by the parser to build binary operators
in the input. CODE, a tree_code, specifies the binary operator, and
ARG1 and ARG2 are the operands. In addition to constructing the
expression, we check for operands that were written with other binary
operators in a way that is likely to confuse the user.
LOCATION is the location of the binary operator. */
struct c_expr
parser_build_binary_op (location_t location, enum tree_code code,
struct c_expr arg1, struct c_expr arg2)
{
struct c_expr result;
enum tree_code code1 = arg1.original_code;
enum tree_code code2 = arg2.original_code;
tree type1 = (arg1.original_type
? arg1.original_type
: TREE_TYPE (arg1.value));
tree type2 = (arg2.original_type
? arg2.original_type
: TREE_TYPE (arg2.value));
result.value = build_binary_op (location, code,
arg1.value, arg2.value, true);
result.original_code = code;
result.original_type = NULL;
if (TREE_CODE (result.value) == ERROR_MARK)
{
set_c_expr_source_range (&result,
arg1.get_start (),
arg2.get_finish ());
return result;
}
if (location != UNKNOWN_LOCATION)
protected_set_expr_location (result.value, location);
set_c_expr_source_range (&result,
arg1.get_start (),
arg2.get_finish ());
/* Check for cases such as x+y<<z which users are likely
to misinterpret. */
if (warn_parentheses)
warn_about_parentheses (location, code, code1, arg1.value, code2,
arg2.value);
if (warn_logical_op)
warn_logical_operator (location, code, TREE_TYPE (result.value),
code1, arg1.value, code2, arg2.value);
if (warn_tautological_compare)
{
tree lhs = arg1.value;
tree rhs = arg2.value;
if (TREE_CODE (lhs) == C_MAYBE_CONST_EXPR)
{
if (C_MAYBE_CONST_EXPR_PRE (lhs) != NULL_TREE
&& TREE_SIDE_EFFECTS (C_MAYBE_CONST_EXPR_PRE (lhs)))
lhs = NULL_TREE;
else
lhs = C_MAYBE_CONST_EXPR_EXPR (lhs);
}
if (TREE_CODE (rhs) == C_MAYBE_CONST_EXPR)
{
if (C_MAYBE_CONST_EXPR_PRE (rhs) != NULL_TREE
&& TREE_SIDE_EFFECTS (C_MAYBE_CONST_EXPR_PRE (rhs)))
rhs = NULL_TREE;
else
rhs = C_MAYBE_CONST_EXPR_EXPR (rhs);
}
if (lhs != NULL_TREE && rhs != NULL_TREE)
warn_tautological_cmp (location, code, lhs, rhs);
}
if (warn_logical_not_paren
&& TREE_CODE_CLASS (code) == tcc_comparison
&& code1 == TRUTH_NOT_EXPR
&& code2 != TRUTH_NOT_EXPR
/* Avoid warning for !!x == y. */
&& (TREE_CODE (arg1.value) != NE_EXPR
|| !integer_zerop (TREE_OPERAND (arg1.value, 1))))
{
/* Avoid warning for !b == y where b has _Bool type. */
tree t = integer_zero_node;
if (TREE_CODE (arg1.value) == EQ_EXPR
&& integer_zerop (TREE_OPERAND (arg1.value, 1))
&& TREE_TYPE (TREE_OPERAND (arg1.value, 0)) == integer_type_node)
{
t = TREE_OPERAND (arg1.value, 0);
do
{
if (TREE_TYPE (t) != integer_type_node)
break;
if (TREE_CODE (t) == C_MAYBE_CONST_EXPR)
t = C_MAYBE_CONST_EXPR_EXPR (t);
else if (CONVERT_EXPR_P (t))
t = TREE_OPERAND (t, 0);
else
break;
}
while (1);
}
if (TREE_CODE (TREE_TYPE (t)) != BOOLEAN_TYPE)
warn_logical_not_parentheses (location, code, arg1.value, arg2.value);
}
/* Warn about comparisons against string literals, with the exception
of testing for equality or inequality of a string literal with NULL. */
if (code == EQ_EXPR || code == NE_EXPR)
{
if ((code1 == STRING_CST
&& !integer_zerop (tree_strip_nop_conversions (arg2.value)))
|| (code2 == STRING_CST
&& !integer_zerop (tree_strip_nop_conversions (arg1.value))))
warning_at (location, OPT_Waddress,
"comparison with string literal results in unspecified behavior");
/* Warn for ptr == '\0', it's likely that it should've been ptr[0]. */
if (POINTER_TYPE_P (type1)
&& null_pointer_constant_p (arg2.value)
&& char_type_p (type2))
{
auto_diagnostic_group d;
if (warning_at (location, OPT_Wpointer_compare,
"comparison between pointer and zero character "
"constant"))
inform (arg1.get_start (),
"did you mean to dereference the pointer?");
}
else if (POINTER_TYPE_P (type2)
&& null_pointer_constant_p (arg1.value)
&& char_type_p (type1))
{
auto_diagnostic_group d;
if (warning_at (location, OPT_Wpointer_compare,
"comparison between pointer and zero character "
"constant"))
inform (arg2.get_start (),
"did you mean to dereference the pointer?");
}
}
else if (TREE_CODE_CLASS (code) == tcc_comparison
&& (code1 == STRING_CST || code2 == STRING_CST))
warning_at (location, OPT_Waddress,
"comparison with string literal results in unspecified behavior");
if (TREE_OVERFLOW_P (result.value)
&& !TREE_OVERFLOW_P (arg1.value)
&& !TREE_OVERFLOW_P (arg2.value))
overflow_warning (location, result.value);
/* Warn about comparisons of different enum types. */
if (warn_enum_compare
&& TREE_CODE_CLASS (code) == tcc_comparison
&& TREE_CODE (type1) == ENUMERAL_TYPE
&& TREE_CODE (type2) == ENUMERAL_TYPE
&& TYPE_MAIN_VARIANT (type1) != TYPE_MAIN_VARIANT (type2))
warning_at (location, OPT_Wenum_compare,
"comparison between %qT and %qT",
type1, type2);
return result;
}
/* Return a tree for the difference of pointers OP0 and OP1.
The resulting tree has type ptrdiff_t. If POINTER_SUBTRACT sanitization is
enabled, assign to INSTRUMENT_EXPR call to libsanitizer. */
static tree
pointer_diff (location_t loc, tree op0, tree op1, tree *instrument_expr)
{
tree restype = ptrdiff_type_node;
tree result, inttype;
addr_space_t as0 = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (op0)));
addr_space_t as1 = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (op1)));
tree target_type = TREE_TYPE (TREE_TYPE (op0));
tree orig_op0 = op0;
tree orig_op1 = op1;
/* If the operands point into different address spaces, we need to
explicitly convert them to pointers into the common address space
before we can subtract the numerical address values. */
if (as0 != as1)
{
addr_space_t as_common;
tree common_type;
/* Determine the common superset address space. This is guaranteed
to exist because the caller verified that comp_target_types
returned non-zero. */
if (!addr_space_superset (as0, as1, &as_common))
gcc_unreachable ();
common_type = common_pointer_type (TREE_TYPE (op0), TREE_TYPE (op1));
op0 = convert (common_type, op0);
op1 = convert (common_type, op1);
}
/* Determine integer type result of the subtraction. This will usually
be the same as the result type (ptrdiff_t), but may need to be a wider
type if pointers for the address space are wider than ptrdiff_t. */
if (TYPE_PRECISION (restype) < TYPE_PRECISION (TREE_TYPE (op0)))
inttype = c_common_type_for_size (TYPE_PRECISION (TREE_TYPE (op0)), 0);
else
inttype = restype;
if (TREE_CODE (target_type) == VOID_TYPE)
pedwarn (loc, OPT_Wpointer_arith,
"pointer of type %<void *%> used in subtraction");
if (TREE_CODE (target_type) == FUNCTION_TYPE)
pedwarn (loc, OPT_Wpointer_arith,
"pointer to a function used in subtraction");
if (sanitize_flags_p (SANITIZE_POINTER_SUBTRACT))
{
gcc_assert (current_function_decl != NULL_TREE);
op0 = save_expr (op0);
op1 = save_expr (op1);
tree tt = builtin_decl_explicit (BUILT_IN_ASAN_POINTER_SUBTRACT);
*instrument_expr = build_call_expr_loc (loc, tt, 2, op0, op1);
}
/* First do the subtraction, then build the divide operator
and only convert at the very end.
Do not do default conversions in case restype is a short type. */
/* POINTER_DIFF_EXPR requires a signed integer type of the same size as
pointers. If some platform cannot provide that, or has a larger
ptrdiff_type to support differences larger than half the address
space, cast the pointers to some larger integer type and do the
computations in that type. */
if (TYPE_PRECISION (inttype) > TYPE_PRECISION (TREE_TYPE (op0)))
op0 = build_binary_op (loc, MINUS_EXPR, convert (inttype, op0),
convert (inttype, op1), false);
else
{
/* Cast away qualifiers. */
op0 = convert (c_common_type (TREE_TYPE (op0), TREE_TYPE (op0)), op0);
op1 = convert (c_common_type (TREE_TYPE (op1), TREE_TYPE (op1)), op1);
op0 = build2_loc (loc, POINTER_DIFF_EXPR, inttype, op0, op1);
}
/* This generates an error if op1 is pointer to incomplete type. */
if (!COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (TREE_TYPE (orig_op1))))
error_at (loc, "arithmetic on pointer to an incomplete type");
else if (verify_type_context (loc, TCTX_POINTER_ARITH,
TREE_TYPE (TREE_TYPE (orig_op0))))
verify_type_context (loc, TCTX_POINTER_ARITH,
TREE_TYPE (TREE_TYPE (orig_op1)));
op1 = c_size_in_bytes (target_type);
if (pointer_to_zero_sized_aggr_p (TREE_TYPE (orig_op1)))
error_at (loc, "arithmetic on pointer to an empty aggregate");
/* Divide by the size, in easiest possible way. */
result = fold_build2_loc (loc, EXACT_DIV_EXPR, inttype,
op0, convert (inttype, op1));
/* Convert to final result type if necessary. */
return convert (restype, result);
}
/* Expand atomic compound assignments into an appropriate sequence as
specified by the C11 standard section 6.5.16.2.
_Atomic T1 E1
T2 E2
E1 op= E2
This sequence is used for all types for which these operations are
supported.
In addition, built-in versions of the 'fe' prefixed routines may
need to be invoked for floating point (real, complex or vector) when
floating-point exceptions are supported. See 6.5.16.2 footnote 113.
T1 newval;
T1 old;
T1 *addr
T2 val
fenv_t fenv
addr = &E1;
val = (E2);
__atomic_load (addr, &old, SEQ_CST);
feholdexcept (&fenv);
loop:
newval = old op val;
if (__atomic_compare_exchange_strong (addr, &old, &newval, SEQ_CST,
SEQ_CST))
goto done;
feclearexcept (FE_ALL_EXCEPT);
goto loop:
done:
feupdateenv (&fenv);
The compiler will issue the __atomic_fetch_* built-in when possible,
otherwise it will generate the generic form of the atomic operations.
This requires temp(s) and has their address taken. The atomic processing
is smart enough to figure out when the size of an object can utilize
a lock-free version, and convert the built-in call to the appropriate
lock-free routine. The optimizers will then dispose of any temps that
are no longer required, and lock-free implementations are utilized as
long as there is target support for the required size.
If the operator is NOP_EXPR, then this is a simple assignment, and
an __atomic_store is issued to perform the assignment rather than
the above loop. */
/* Build an atomic assignment at LOC, expanding into the proper
sequence to store LHS MODIFYCODE= RHS. Return a value representing
the result of the operation, unless RETURN_OLD_P, in which case
return the old value of LHS (this is only for postincrement and
postdecrement). */
static tree
build_atomic_assign (location_t loc, tree lhs, enum tree_code modifycode,
tree rhs, bool return_old_p)
{
tree fndecl, func_call;
vec<tree, va_gc> *params;
tree val, nonatomic_lhs_type, nonatomic_rhs_type, newval, newval_addr;
tree old, old_addr;
tree compound_stmt = NULL_TREE;
tree stmt, goto_stmt;
tree loop_label, loop_decl, done_label, done_decl;
tree lhs_type = TREE_TYPE (lhs);
tree lhs_addr = build_unary_op (loc, ADDR_EXPR, lhs, false);
tree seq_cst = build_int_cst (integer_type_node, MEMMODEL_SEQ_CST);
tree rhs_semantic_type = TREE_TYPE (rhs);
tree nonatomic_rhs_semantic_type;
tree rhs_type;
gcc_assert (TYPE_ATOMIC (lhs_type));
if (return_old_p)
gcc_assert (modifycode == PLUS_EXPR || modifycode == MINUS_EXPR);
/* Allocate enough vector items for a compare_exchange. */
vec_alloc (params, 6);
/* Create a compound statement to hold the sequence of statements
with a loop. */
if (modifycode != NOP_EXPR)
{
compound_stmt = c_begin_compound_stmt (false);
/* For consistency with build_modify_expr on non-_Atomic,
mark the lhs as read. Also, it would be very hard to match
such expressions in mark_exp_read. */
mark_exp_read (lhs);
}
/* Remove any excess precision (which is only present here in the
case of compound assignments). */
if (TREE_CODE (rhs) == EXCESS_PRECISION_EXPR)
{
gcc_assert (modifycode != NOP_EXPR);
rhs = TREE_OPERAND (rhs, 0);
}
rhs_type = TREE_TYPE (rhs);
/* Fold the RHS if it hasn't already been folded. */
if (modifycode != NOP_EXPR)
rhs = c_fully_fold (rhs, false, NULL);
/* Remove the qualifiers for the rest of the expressions and create
the VAL temp variable to hold the RHS. */
nonatomic_lhs_type = build_qualified_type (lhs_type, TYPE_UNQUALIFIED);
nonatomic_rhs_type = build_qualified_type (rhs_type, TYPE_UNQUALIFIED);
nonatomic_rhs_semantic_type = build_qualified_type (rhs_semantic_type,
TYPE_UNQUALIFIED);
val = create_tmp_var_raw (nonatomic_rhs_type);
TREE_ADDRESSABLE (val) = 1;
TREE_NO_WARNING (val) = 1;
rhs = build4 (TARGET_EXPR, nonatomic_rhs_type, val, rhs, NULL_TREE,
NULL_TREE);
TREE_SIDE_EFFECTS (rhs) = 1;
SET_EXPR_LOCATION (rhs, loc);
if (modifycode != NOP_EXPR)
add_stmt (rhs);
/* NOP_EXPR indicates it's a straight store of the RHS. Simply issue
an atomic_store. */
if (modifycode == NOP_EXPR)
{
compound_stmt = rhs;
/* Build __atomic_store (&lhs, &val, SEQ_CST) */
rhs = build_unary_op (loc, ADDR_EXPR, val, false);
fndecl = builtin_decl_explicit (BUILT_IN_ATOMIC_STORE);
params->quick_push (lhs_addr);
params->quick_push (rhs);
params->quick_push (seq_cst);
func_call = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL);
compound_stmt = build2 (COMPOUND_EXPR, void_type_node,
compound_stmt, func_call);
/* VAL is the value which was stored, return a COMPOUND_STMT of
the statement and that value. */
return build2 (COMPOUND_EXPR, nonatomic_lhs_type, compound_stmt, val);
}
/* Attempt to implement the atomic operation as an __atomic_fetch_* or
__atomic_*_fetch built-in rather than a CAS loop. atomic_bool type
isn't applicable for such builtins. ??? Do we want to handle enums? */
if ((TREE_CODE (lhs_type) == INTEGER_TYPE || POINTER_TYPE_P (lhs_type))
&& TREE_CODE (rhs_type) == INTEGER_TYPE)
{
built_in_function fncode;
switch (modifycode)
{
case PLUS_EXPR:
case POINTER_PLUS_EXPR:
fncode = (return_old_p
? BUILT_IN_ATOMIC_FETCH_ADD_N
: BUILT_IN_ATOMIC_ADD_FETCH_N);
break;
case MINUS_EXPR:
fncode = (return_old_p
? BUILT_IN_ATOMIC_FETCH_SUB_N
: BUILT_IN_ATOMIC_SUB_FETCH_N);
break;
case BIT_AND_EXPR:
fncode = (return_old_p
? BUILT_IN_ATOMIC_FETCH_AND_N
: BUILT_IN_ATOMIC_AND_FETCH_N);
break;
case BIT_IOR_EXPR:
fncode = (return_old_p
? BUILT_IN_ATOMIC_FETCH_OR_N
: BUILT_IN_ATOMIC_OR_FETCH_N);
break;
case BIT_XOR_EXPR:
fncode = (return_old_p
? BUILT_IN_ATOMIC_FETCH_XOR_N
: BUILT_IN_ATOMIC_XOR_FETCH_N);
break;
default:
goto cas_loop;
}
/* We can only use "_1" through "_16" variants of the atomic fetch
built-ins. */
unsigned HOST_WIDE_INT size = tree_to_uhwi (TYPE_SIZE_UNIT (lhs_type));
if (size != 1 && size != 2 && size != 4 && size != 8 && size != 16)
goto cas_loop;
/* If this is a pointer type, we need to multiply by the size of
the pointer target type. */
if (POINTER_TYPE_P (lhs_type))
{
if (!COMPLETE_TYPE_P (TREE_TYPE (lhs_type))
/* ??? This would introduce -Wdiscarded-qualifiers
warning: __atomic_fetch_* expect volatile void *
type as the first argument. (Assignments between
atomic and non-atomic objects are OK.) */
|| TYPE_RESTRICT (lhs_type))
goto cas_loop;
tree sz = TYPE_SIZE_UNIT (TREE_TYPE (lhs_type));
rhs = fold_build2_loc (loc, MULT_EXPR, ptrdiff_type_node,
convert (ptrdiff_type_node, rhs),
convert (ptrdiff_type_node, sz));
}
/* Build __atomic_fetch_* (&lhs, &val, SEQ_CST), or
__atomic_*_fetch (&lhs, &val, SEQ_CST). */
fndecl = builtin_decl_explicit (fncode);
params->quick_push (lhs_addr);
params->quick_push (rhs);
params->quick_push (seq_cst);
func_call = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL);
newval = create_tmp_var_raw (nonatomic_lhs_type);
TREE_ADDRESSABLE (newval) = 1;
TREE_NO_WARNING (newval) = 1;
rhs = build4 (TARGET_EXPR, nonatomic_lhs_type, newval, func_call,
NULL_TREE, NULL_TREE);
SET_EXPR_LOCATION (rhs, loc);
add_stmt (rhs);
/* Finish the compound statement. */
compound_stmt = c_end_compound_stmt (loc, compound_stmt, false);
/* NEWVAL is the value which was stored, return a COMPOUND_STMT of
the statement and that value. */
return build2 (COMPOUND_EXPR, nonatomic_lhs_type, compound_stmt, newval);
}
cas_loop:
/* Create the variables and labels required for the op= form. */
old = create_tmp_var_raw (nonatomic_lhs_type);
old_addr = build_unary_op (loc, ADDR_EXPR, old, false);
TREE_ADDRESSABLE (old) = 1;
TREE_NO_WARNING (old) = 1;
newval = create_tmp_var_raw (nonatomic_lhs_type);
newval_addr = build_unary_op (loc, ADDR_EXPR, newval, false);
TREE_ADDRESSABLE (newval) = 1;
TREE_NO_WARNING (newval) = 1;
loop_decl = create_artificial_label (loc);
loop_label = build1 (LABEL_EXPR, void_type_node, loop_decl);
done_decl = create_artificial_label (loc);
done_label = build1 (LABEL_EXPR, void_type_node, done_decl);
/* __atomic_load (addr, &old, SEQ_CST). */
fndecl = builtin_decl_explicit (BUILT_IN_ATOMIC_LOAD);
params->quick_push (lhs_addr);
params->quick_push (old_addr);
params->quick_push (seq_cst);
func_call = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL);
old = build4 (TARGET_EXPR, nonatomic_lhs_type, old, func_call, NULL_TREE,
NULL_TREE);
add_stmt (old);
params->truncate (0);
/* Create the expressions for floating-point environment
manipulation, if required. */
bool need_fenv = (flag_trapping_math
&& (FLOAT_TYPE_P (lhs_type) || FLOAT_TYPE_P (rhs_type)));
tree hold_call = NULL_TREE, clear_call = NULL_TREE, update_call = NULL_TREE;
if (need_fenv)
targetm.atomic_assign_expand_fenv (&hold_call, &clear_call, &update_call);
if (hold_call)
add_stmt (hold_call);
/* loop: */
add_stmt (loop_label);
/* newval = old + val; */
if (rhs_type != rhs_semantic_type)
val = build1 (EXCESS_PRECISION_EXPR, nonatomic_rhs_semantic_type, val);
rhs = build_binary_op (loc, modifycode, old, val, true);
if (TREE_CODE (rhs) == EXCESS_PRECISION_EXPR)
{
tree eptype = TREE_TYPE (rhs);
rhs = c_fully_fold (TREE_OPERAND (rhs, 0), false, NULL);
rhs = build1 (EXCESS_PRECISION_EXPR, eptype, rhs);
}
else
rhs = c_fully_fold (rhs, false, NULL);
rhs = convert_for_assignment (loc, UNKNOWN_LOCATION, nonatomic_lhs_type,
rhs, NULL_TREE, ic_assign, false, NULL_TREE,
NULL_TREE, 0);
if (rhs != error_mark_node)
{
rhs = build4 (TARGET_EXPR, nonatomic_lhs_type, newval, rhs, NULL_TREE,
NULL_TREE);
SET_EXPR_LOCATION (rhs, loc);
add_stmt (rhs);
}
/* if (__atomic_compare_exchange (addr, &old, &new, false, SEQ_CST, SEQ_CST))
goto done; */
fndecl = builtin_decl_explicit (BUILT_IN_ATOMIC_COMPARE_EXCHANGE);
params->quick_push (lhs_addr);
params->quick_push (old_addr);
params->quick_push (newval_addr);
params->quick_push (integer_zero_node);
params->quick_push (seq_cst);
params->quick_push (seq_cst);
func_call = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL);
goto_stmt = build1 (GOTO_EXPR, void_type_node, done_decl);
SET_EXPR_LOCATION (goto_stmt, loc);
stmt = build3 (COND_EXPR, void_type_node, func_call, goto_stmt, NULL_TREE);
SET_EXPR_LOCATION (stmt, loc);
add_stmt (stmt);
if (clear_call)
add_stmt (clear_call);
/* goto loop; */
goto_stmt = build1 (GOTO_EXPR, void_type_node, loop_decl);
SET_EXPR_LOCATION (goto_stmt, loc);
add_stmt (goto_stmt);
/* done: */
add_stmt (done_label);
if (update_call)
add_stmt (update_call);
/* Finish the compound statement. */
compound_stmt = c_end_compound_stmt (loc, compound_stmt, false);
/* NEWVAL is the value that was successfully stored, return a
COMPOUND_EXPR of the statement and the appropriate value. */
return build2 (COMPOUND_EXPR, nonatomic_lhs_type, compound_stmt,
return_old_p ? old : newval);
}
/* Construct and perhaps optimize a tree representation
for a unary operation. CODE, a tree_code, specifies the operation
and XARG is the operand.
For any CODE other than ADDR_EXPR, NOCONVERT suppresses the default
promotions (such as from short to int).
For ADDR_EXPR, the default promotions are not applied; NOCONVERT allows
non-lvalues; this is only used to handle conversion of non-lvalue arrays
to pointers in C99.
LOCATION is the location of the operator. */
tree
build_unary_op (location_t location, enum tree_code code, tree xarg,
bool noconvert)
{
/* No default_conversion here. It causes trouble for ADDR_EXPR. */
tree arg = xarg;
tree argtype = NULL_TREE;
enum tree_code typecode;
tree val;
tree ret = error_mark_node;
tree eptype = NULL_TREE;
const char *invalid_op_diag;
bool int_operands;
int_operands = EXPR_INT_CONST_OPERANDS (xarg);
if (int_operands)
arg = remove_c_maybe_const_expr (arg);
if (code != ADDR_EXPR)
arg = require_complete_type (location, arg);
typecode = TREE_CODE (TREE_TYPE (arg));
if (typecode == ERROR_MARK)
return error_mark_node;
if (typecode == ENUMERAL_TYPE || typecode == BOOLEAN_TYPE)
typecode = INTEGER_TYPE;
if ((invalid_op_diag
= targetm.invalid_unary_op (code, TREE_TYPE (xarg))))
{
error_at (location, invalid_op_diag);
return error_mark_node;
}
if (TREE_CODE (arg) == EXCESS_PRECISION_EXPR)
{
eptype = TREE_TYPE (arg);
arg = TREE_OPERAND (arg, 0);
}
switch (code)
{
case CONVERT_EXPR:
/* This is used for unary plus, because a CONVERT_EXPR
is enough to prevent anybody from looking inside for
associativity, but won't generate any code. */
if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE
|| typecode == FIXED_POINT_TYPE || typecode == COMPLEX_TYPE
|| gnu_vector_type_p (TREE_TYPE (arg))))
{
error_at (location, "wrong type argument to unary plus");
return error_mark_node;
}
else if (!noconvert)
arg = default_conversion (arg);
arg = non_lvalue_loc (location, arg);
break;
case NEGATE_EXPR:
if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE
|| typecode == FIXED_POINT_TYPE || typecode == COMPLEX_TYPE
|| gnu_vector_type_p (TREE_TYPE (arg))))
{
error_at (location, "wrong type argument to unary minus");
return error_mark_node;
}
else if (!noconvert)
arg = default_conversion (arg);
break;
case BIT_NOT_EXPR:
/* ~ works on integer types and non float vectors. */
if (typecode == INTEGER_TYPE
|| (gnu_vector_type_p (TREE_TYPE (arg))
&& !VECTOR_FLOAT_TYPE_P (TREE_TYPE (arg))))
{
tree e = arg;
/* Warn if the expression has boolean value. */
while (TREE_CODE (e) == COMPOUND_EXPR)
e = TREE_OPERAND (e, 1);
if ((TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE
|| truth_value_p (TREE_CODE (e))))
{
auto_diagnostic_group d;
if (warning_at (location, OPT_Wbool_operation,
"%<~%> on a boolean expression"))
{
gcc_rich_location richloc (location);
richloc.add_fixit_insert_before (location, "!");
inform (&richloc, "did you mean to use logical not?");
}
}
if (!noconvert)
arg = default_conversion (arg);
}
else if (typecode == COMPLEX_TYPE)
{
code = CONJ_EXPR;
pedwarn (location, OPT_Wpedantic,
"ISO C does not support %<~%> for complex conjugation");
if (!noconvert)
arg = default_conversion (arg);
}
else
{
error_at (location, "wrong type argument to bit-complement");
return error_mark_node;
}
break;
case ABS_EXPR:
if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE))
{
error_at (location, "wrong type argument to abs");
return error_mark_node;
}
else if (!noconvert)
arg = default_conversion (arg);
break;
case ABSU_EXPR:
if (!(typecode == INTEGER_TYPE))
{
error_at (location, "wrong type argument to absu");
return error_mark_node;
}
else if (!noconvert)
arg = default_conversion (arg);
break;
case CONJ_EXPR:
/* Conjugating a real value is a no-op, but allow it anyway. */
if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE
|| typecode == COMPLEX_TYPE))
{
error_at (location, "wrong type argument to conjugation");
return error_mark_node;
}
else if (!noconvert)
arg = default_conversion (arg);
break;
case TRUTH_NOT_EXPR:
if (typecode != INTEGER_TYPE && typecode != FIXED_POINT_TYPE
&& typecode != REAL_TYPE && typecode != POINTER_TYPE
&& typecode != COMPLEX_TYPE)
{
error_at (location,
"wrong type argument to unary exclamation mark");
return error_mark_node;
}
if (int_operands)
{
arg = c_objc_common_truthvalue_conversion (location, xarg);
arg = remove_c_maybe_const_expr (arg);
}
else
arg = c_objc_common_truthvalue_conversion (location, arg);
ret = invert_truthvalue_loc (location, arg);
/* If the TRUTH_NOT_EXPR has been folded, reset the location. */
if (EXPR_P (ret) && EXPR_HAS_LOCATION (ret))
location = EXPR_LOCATION (ret);
goto return_build_unary_op;
case REALPART_EXPR:
case IMAGPART_EXPR:
ret = build_real_imag_expr (location, code, arg);
if (ret == error_mark_node)
return error_mark_node;
if (eptype && TREE_CODE (eptype) == COMPLEX_TYPE)
eptype = TREE_TYPE (eptype);
goto return_build_unary_op;
case PREINCREMENT_EXPR:
case POSTINCREMENT_EXPR:
case PREDECREMENT_EXPR:
case POSTDECREMENT_EXPR:
if (TREE_CODE (arg) == C_MAYBE_CONST_EXPR)
{
tree inner = build_unary_op (location, code,
C_MAYBE_CONST_EXPR_EXPR (arg),
noconvert);
if (inner == error_mark_node)
return error_mark_node;
ret = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (inner),
C_MAYBE_CONST_EXPR_PRE (arg), inner);
gcc_assert (!C_MAYBE_CONST_EXPR_INT_OPERANDS (arg));
C_MAYBE_CONST_EXPR_NON_CONST (ret) = 1;
goto return_build_unary_op;
}
/* Complain about anything that is not a true lvalue. In
Objective-C, skip this check for property_refs. */
if (!objc_is_property_ref (arg)
&& !lvalue_or_else (location,
arg, ((code == PREINCREMENT_EXPR
|| code == POSTINCREMENT_EXPR)
? lv_increment
: lv_decrement)))
return error_mark_node;
if (warn_cxx_compat && TREE_CODE (TREE_TYPE (arg)) == ENUMERAL_TYPE)
{
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
warning_at (location, OPT_Wc___compat,
"increment of enumeration value is invalid in C++");
else
warning_at (location, OPT_Wc___compat,
"decrement of enumeration value is invalid in C++");
}
if (TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE)
{
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
warning_at (location, OPT_Wbool_operation,
"increment of a boolean expression");
else
warning_at (location, OPT_Wbool_operation,
"decrement of a boolean expression");
}
/* Ensure the argument is fully folded inside any SAVE_EXPR. */
arg = c_fully_fold (arg, false, NULL, true);
bool atomic_op;
atomic_op = really_atomic_lvalue (arg);
/* Increment or decrement the real part of the value,
and don't change the imaginary part. */
if (typecode == COMPLEX_TYPE)
{
tree real, imag;
pedwarn (location, OPT_Wpedantic,
"ISO C does not support %<++%> and %<--%> on complex types");
if (!atomic_op)
{
arg = stabilize_reference (arg);
real = build_unary_op (EXPR_LOCATION (arg), REALPART_EXPR, arg,
true);
imag = build_unary_op (EXPR_LOCATION (arg), IMAGPART_EXPR, arg,
true);
real = build_unary_op (EXPR_LOCATION (arg), code, real, true);
if (real == error_mark_node || imag == error_mark_node)
return error_mark_node;
ret = build2 (COMPLEX_EXPR, TREE_TYPE (arg),
real, imag);
goto return_build_unary_op;
}
}
/* Report invalid types. */
if (typecode != POINTER_TYPE && typecode != FIXED_POINT_TYPE
&& typecode != INTEGER_TYPE && typecode != REAL_TYPE
&& typecode != COMPLEX_TYPE
&& !gnu_vector_type_p (TREE_TYPE (arg)))
{
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
error_at (location, "wrong type argument to increment");
else
error_at (location, "wrong type argument to decrement");
return error_mark_node;
}
{
tree inc;
argtype = TREE_TYPE (arg);
/* Compute the increment. */
if (typecode == POINTER_TYPE)
{
/* If pointer target is an incomplete type,
we just cannot know how to do the arithmetic. */
if (!COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (argtype)))
{
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
error_at (location,
"increment of pointer to an incomplete type %qT",
TREE_TYPE (argtype));
else
error_at (location,
"decrement of pointer to an incomplete type %qT",
TREE_TYPE (argtype));
}
else if (TREE_CODE (TREE_TYPE (argtype)) == FUNCTION_TYPE
|| TREE_CODE (TREE_TYPE (argtype)) == VOID_TYPE)
{
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
pedwarn (location, OPT_Wpointer_arith,
"wrong type argument to increment");
else
pedwarn (location, OPT_Wpointer_arith,
"wrong type argument to decrement");
}
else
verify_type_context (location, TCTX_POINTER_ARITH,
TREE_TYPE (argtype));
inc = c_size_in_bytes (TREE_TYPE (argtype));
inc = convert_to_ptrofftype_loc (location, inc);
}
else if (FRACT_MODE_P (TYPE_MODE (argtype)))
{
/* For signed fract types, we invert ++ to -- or
-- to ++, and change inc from 1 to -1, because
it is not possible to represent 1 in signed fract constants.
For unsigned fract types, the result always overflows and
we get an undefined (original) or the maximum value. */
if (code == PREINCREMENT_EXPR)
code = PREDECREMENT_EXPR;
else if (code == PREDECREMENT_EXPR)
code = PREINCREMENT_EXPR;
else if (code == POSTINCREMENT_EXPR)
code = POSTDECREMENT_EXPR;
else /* code == POSTDECREMENT_EXPR */
code = POSTINCREMENT_EXPR;
inc = integer_minus_one_node;
inc = convert (argtype, inc);
}
else
{
inc = VECTOR_TYPE_P (argtype)
? build_one_cst (argtype)
: integer_one_node;
inc = convert (argtype, inc);
}
/* If 'arg' is an Objective-C PROPERTY_REF expression, then we
need to ask Objective-C to build the increment or decrement
expression for it. */
if (objc_is_property_ref (arg))
return objc_build_incr_expr_for_property_ref (location, code,
arg, inc);
/* Report a read-only lvalue. */
if (TYPE_READONLY (argtype))
{
readonly_error (location, arg,
((code == PREINCREMENT_EXPR
|| code == POSTINCREMENT_EXPR)
? lv_increment : lv_decrement));
return error_mark_node;
}
else if (TREE_READONLY (arg))
readonly_warning (arg,
((code == PREINCREMENT_EXPR
|| code == POSTINCREMENT_EXPR)
? lv_increment : lv_decrement));
/* If the argument is atomic, use the special code sequences for
atomic compound assignment. */
if (atomic_op)
{
arg = stabilize_reference (arg);
ret = build_atomic_assign (location, arg,
((code == PREINCREMENT_EXPR
|| code == POSTINCREMENT_EXPR)
? PLUS_EXPR
: MINUS_EXPR),
(FRACT_MODE_P (TYPE_MODE (argtype))
? inc
: integer_one_node),
(code == POSTINCREMENT_EXPR
|| code == POSTDECREMENT_EXPR));
goto return_build_unary_op;
}
if (TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE)
val = boolean_increment (code, arg);
else
val = build2 (code, TREE_TYPE (arg), arg, inc);
TREE_SIDE_EFFECTS (val) = 1;
if (TREE_CODE (val) != code)
TREE_NO_WARNING (val) = 1;
ret = val;
goto return_build_unary_op;
}
case ADDR_EXPR:
/* Note that this operation never does default_conversion. */
/* The operand of unary '&' must be an lvalue (which excludes
expressions of type void), or, in C99, the result of a [] or
unary '*' operator. */
if (VOID_TYPE_P (TREE_TYPE (arg))
&& TYPE_QUALS (TREE_TYPE (arg)) == TYPE_UNQUALIFIED
&& (!INDIRECT_REF_P (arg) || !flag_isoc99))
pedwarn (location, 0, "taking address of expression of type %<void%>");
/* Let &* cancel out to simplify resulting code. */
if (INDIRECT_REF_P (arg))
{
/* Don't let this be an lvalue. */
if (lvalue_p (TREE_OPERAND (arg, 0)))
return non_lvalue_loc (location, TREE_OPERAND (arg, 0));
ret = TREE_OPERAND (arg, 0);
goto return_build_unary_op;
}
/* Anything not already handled and not a true memory reference
or a non-lvalue array is an error. */
if (typecode != FUNCTION_TYPE && !noconvert
&& !lvalue_or_else (location, arg, lv_addressof))
return error_mark_node;
/* Move address operations inside C_MAYBE_CONST_EXPR to simplify
folding later. */
if (TREE_CODE (arg) == C_MAYBE_CONST_EXPR)
{
tree inner = build_unary_op (location, code,
C_MAYBE_CONST_EXPR_EXPR (arg),
noconvert);
ret = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (inner),
C_MAYBE_CONST_EXPR_PRE (arg), inner);
gcc_assert (!C_MAYBE_CONST_EXPR_INT_OPERANDS (arg));
C_MAYBE_CONST_EXPR_NON_CONST (ret)
= C_MAYBE_CONST_EXPR_NON_CONST (arg);
goto return_build_unary_op;
}
/* Ordinary case; arg is a COMPONENT_REF or a decl. */
argtype = TREE_TYPE (arg);
/* If the lvalue is const or volatile, merge that into the type
to which the address will point. This is only needed
for function types. */
if ((DECL_P (arg) || REFERENCE_CLASS_P (arg))
&& (TREE_READONLY (arg) || TREE_THIS_VOLATILE (arg))
&& TREE_CODE (argtype) == FUNCTION_TYPE)
{
int orig_quals = TYPE_QUALS (strip_array_types (argtype));
int quals = orig_quals;
if (TREE_READONLY (arg))
quals |= TYPE_QUAL_CONST;
if (TREE_THIS_VOLATILE (arg))
quals |= TYPE_QUAL_VOLATILE;
argtype = c_build_qualified_type (argtype, quals);
}
switch (TREE_CODE (arg))
{
case COMPONENT_REF:
if (DECL_C_BIT_FIELD (TREE_OPERAND (arg, 1)))
{
error_at (location, "cannot take address of bit-field %qD",
TREE_OPERAND (arg, 1));
return error_mark_node;
}
/* fall through */
case ARRAY_REF:
if (TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (TREE_OPERAND (arg, 0))))
{
if (!AGGREGATE_TYPE_P (TREE_TYPE (arg))
&& !VECTOR_TYPE_P (TREE_TYPE (arg)))
{
error_at (location, "cannot take address of scalar with "
"reverse storage order");
return error_mark_node;
}
if (TREE_CODE (TREE_TYPE (arg)) == ARRAY_TYPE
&& TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (arg)))
warning_at (location, OPT_Wscalar_storage_order,
"address of array with reverse scalar storage "
"order requested");
}
default:
break;
}
if (!c_mark_addressable (arg))
return error_mark_node;
gcc_assert (TREE_CODE (arg) != COMPONENT_REF
|| !DECL_C_BIT_FIELD (TREE_OPERAND (arg, 1)));
argtype = build_pointer_type (argtype);
/* ??? Cope with user tricks that amount to offsetof. Delete this
when we have proper support for integer constant expressions. */
val = get_base_address (arg);
if (val && INDIRECT_REF_P (val)
&& TREE_CONSTANT (TREE_OPERAND (val, 0)))
{
ret = fold_offsetof (arg, argtype);
goto return_build_unary_op;
}
val = build1 (ADDR_EXPR, argtype, arg);
ret = val;
goto return_build_unary_op;
default:
gcc_unreachable ();
}
if (argtype == NULL_TREE)
argtype = TREE_TYPE (arg);
if (TREE_CODE (arg) == INTEGER_CST)
ret = (require_constant_value
? fold_build1_initializer_loc (location, code, argtype, arg)
: fold_build1_loc (location, code, argtype, arg));
else
ret = build1 (code, argtype, arg);
return_build_unary_op:
gcc_assert (ret != error_mark_node);
if (TREE_CODE (ret) == INTEGER_CST && !TREE_OVERFLOW (ret)
&& !(TREE_CODE (xarg) == INTEGER_CST && !TREE_OVERFLOW (xarg)))
ret = build1 (NOP_EXPR, TREE_TYPE (ret), ret);
else if (TREE_CODE (ret) != INTEGER_CST && int_operands)
ret = note_integer_operands (ret);
if (eptype)
ret = build1 (EXCESS_PRECISION_EXPR, eptype, ret);
protected_set_expr_location (ret, location);
return ret;
}
/* Return nonzero if REF is an lvalue valid for this language.
Lvalues can be assigned, unless their type has TYPE_READONLY.
Lvalues can have their address taken, unless they have C_DECL_REGISTER. */
bool
lvalue_p (const_tree ref)
{
const enum tree_code code = TREE_CODE (ref);
switch (code)
{
case REALPART_EXPR:
case IMAGPART_EXPR:
case COMPONENT_REF:
return lvalue_p (TREE_OPERAND (ref, 0));
case C_MAYBE_CONST_EXPR:
return lvalue_p (TREE_OPERAND (ref, 1));
case COMPOUND_LITERAL_EXPR:
case STRING_CST:
return true;
case INDIRECT_REF:
case ARRAY_REF:
case VAR_DECL:
case PARM_DECL:
case RESULT_DECL:
case ERROR_MARK:
return (TREE_CODE (TREE_TYPE (ref)) != FUNCTION_TYPE
&& TREE_CODE (TREE_TYPE (ref)) != METHOD_TYPE);
case BIND_EXPR:
return TREE_CODE (TREE_TYPE (ref)) == ARRAY_TYPE;
default:
return false;
}
}
/* Give a warning for storing in something that is read-only in GCC
terms but not const in ISO C terms. */
static void
readonly_warning (tree arg, enum lvalue_use use)
{
switch (use)
{
case lv_assign:
warning (0, "assignment of read-only location %qE", arg);
break;
case lv_increment:
warning (0, "increment of read-only location %qE", arg);
break;
case lv_decrement:
warning (0, "decrement of read-only location %qE", arg);
break;
default:
gcc_unreachable ();
}
return;
}
/* Return nonzero if REF is an lvalue valid for this language;
otherwise, print an error message and return zero. USE says
how the lvalue is being used and so selects the error message.
LOCATION is the location at which any error should be reported. */
static int
lvalue_or_else (location_t loc, const_tree ref, enum lvalue_use use)
{
int win = lvalue_p (ref);
if (!win)
lvalue_error (loc, use);
return win;
}
/* Mark EXP saying that we need to be able to take the
address of it; it should not be allocated in a register.
Returns true if successful. ARRAY_REF_P is true if this
is for ARRAY_REF construction - in that case we don't want
to look through VIEW_CONVERT_EXPR from VECTOR_TYPE to ARRAY_TYPE,
it is fine to use ARRAY_REFs for vector subscripts on vector
register variables. */
bool
c_mark_addressable (tree exp, bool array_ref_p)
{
tree x = exp;
while (1)
switch (TREE_CODE (x))
{
case VIEW_CONVERT_EXPR:
if (array_ref_p
&& TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE
&& VECTOR_TYPE_P (TREE_TYPE (TREE_OPERAND (x, 0))))
return true;
/* FALLTHRU */
case COMPONENT_REF:
case ADDR_EXPR:
case ARRAY_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
x = TREE_OPERAND (x, 0);
break;
case COMPOUND_LITERAL_EXPR:
TREE_ADDRESSABLE (x) = 1;
TREE_ADDRESSABLE (COMPOUND_LITERAL_EXPR_DECL (x)) = 1;
return true;
case CONSTRUCTOR:
TREE_ADDRESSABLE (x) = 1;
return true;
case VAR_DECL:
case CONST_DECL:
case PARM_DECL:
case RESULT_DECL:
if (C_DECL_REGISTER (x)
&& DECL_NONLOCAL (x))
{
if (TREE_PUBLIC (x) || is_global_var (x))
{
error
("global register variable %qD used in nested function", x);
return false;
}
pedwarn (input_location, 0, "register variable %qD used in nested function", x);
}
else if (C_DECL_REGISTER (x))
{
if (TREE_PUBLIC (x) || is_global_var (x))
error ("address of global register variable %qD requested", x);
else
error ("address of register variable %qD requested", x);
return false;
}
/* FALLTHRU */
case FUNCTION_DECL:
TREE_ADDRESSABLE (x) = 1;
/* FALLTHRU */
default:
return true;
}
}
/* Convert EXPR to TYPE, warning about conversion problems with
constants. SEMANTIC_TYPE is the type this conversion would use
without excess precision. If SEMANTIC_TYPE is NULL, this function
is equivalent to convert_and_check. This function is a wrapper that
handles conversions that may be different than
the usual ones because of excess precision. */
static tree
ep_convert_and_check (location_t loc, tree type, tree expr,
tree semantic_type)
{
if (TREE_TYPE (expr) == type)
return expr;
/* For C11, integer conversions may have results with excess
precision. */
if (flag_isoc11 || !semantic_type)
return convert_and_check (loc, type, expr);
if (TREE_CODE (TREE_TYPE (expr)) == INTEGER_TYPE
&& TREE_TYPE (expr) != semantic_type)
{
/* For integers, we need to check the real conversion, not
the conversion to the excess precision type. */
expr = convert_and_check (loc, semantic_type, expr);
}
/* Result type is the excess precision type, which should be
large enough, so do not check. */
return convert (type, expr);
}
/* If EXPR refers to a built-in declared without a prototype returns
the actual type of the built-in and, if non-null, set *BLTIN to
a pointer to the built-in. Otherwise return the type of EXPR
and clear *BLTIN if non-null. */
static tree
type_or_builtin_type (tree expr, tree *bltin = NULL)
{
tree dummy;
if (!bltin)
bltin = &dummy;
*bltin = NULL_TREE;
tree type = TREE_TYPE (expr);
if (TREE_CODE (expr) != ADDR_EXPR)
return type;
tree oper = TREE_OPERAND (expr, 0);
if (!DECL_P (oper)
|| TREE_CODE (oper) != FUNCTION_DECL
|| !fndecl_built_in_p (oper, BUILT_IN_NORMAL))
return type;
built_in_function code = DECL_FUNCTION_CODE (oper);
if (!C_DECL_BUILTIN_PROTOTYPE (oper))
return type;
if ((*bltin = builtin_decl_implicit (code)))
type = build_pointer_type (TREE_TYPE (*bltin));
return type;
}
/* Build and return a conditional expression IFEXP ? OP1 : OP2. If
IFEXP_BCP then the condition is a call to __builtin_constant_p, and
if folded to an integer constant then the unselected half may
contain arbitrary operations not normally permitted in constant
expressions. Set the location of the expression to LOC. */
tree
build_conditional_expr (location_t colon_loc, tree ifexp, bool ifexp_bcp,
tree op1, tree op1_original_type, location_t op1_loc,
tree op2, tree op2_original_type, location_t op2_loc)
{
tree type1;
tree type2;
enum tree_code code1;
enum tree_code code2;
tree result_type = NULL;
tree semantic_result_type = NULL;
tree orig_op1 = op1, orig_op2 = op2;
bool int_const, op1_int_operands, op2_int_operands, int_operands;
bool ifexp_int_operands;
tree ret;
op1_int_operands = EXPR_INT_CONST_OPERANDS (orig_op1);
if (op1_int_operands)
op1 = remove_c_maybe_const_expr (op1);
op2_int_operands = EXPR_INT_CONST_OPERANDS (orig_op2);
if (op2_int_operands)
op2 = remove_c_maybe_const_expr (op2);
ifexp_int_operands = EXPR_INT_CONST_OPERANDS (ifexp);
if (ifexp_int_operands)
ifexp = remove_c_maybe_const_expr (ifexp);
/* Promote both alternatives. */
if (TREE_CODE (TREE_TYPE (op1)) != VOID_TYPE)
op1 = default_conversion (op1);
if (TREE_CODE (TREE_TYPE (op2)) != VOID_TYPE)
op2 = default_conversion (op2);
if (TREE_CODE (ifexp) == ERROR_MARK
|| TREE_CODE (TREE_TYPE (op1)) == ERROR_MARK
|| TREE_CODE (TREE_TYPE (op2)) == ERROR_MARK)
return error_mark_node;
tree bltin1 = NULL_TREE;
tree bltin2 = NULL_TREE;
type1 = type_or_builtin_type (op1, &bltin1);
code1 = TREE_CODE (type1);
type2 = type_or_builtin_type (op2, &bltin2);
code2 = TREE_CODE (type2);
if (code1 == POINTER_TYPE && reject_gcc_builtin (op1))
return error_mark_node;
if (code2 == POINTER_TYPE && reject_gcc_builtin (op2))
return error_mark_node;
/* C90 does not permit non-lvalue arrays in conditional expressions.
In C99 they will be pointers by now. */
if (code1 == ARRAY_TYPE || code2 == ARRAY_TYPE)
{
error_at (colon_loc, "non-lvalue array in conditional expression");
return error_mark_node;
}
if ((TREE_CODE (op1) == EXCESS_PRECISION_EXPR
|| TREE_CODE (op2) == EXCESS_PRECISION_EXPR)
&& (code1 == INTEGER_TYPE || code1 == REAL_TYPE
|| code1 == COMPLEX_TYPE)
&& (code2 == INTEGER_TYPE || code2 == REAL_TYPE
|| code2 == COMPLEX_TYPE))
{
semantic_result_type = c_common_type (type1, type2);
if (TREE_CODE (op1) == EXCESS_PRECISION_EXPR)
{
op1 = TREE_OPERAND (op1, 0);
type1 = TREE_TYPE (op1);
gcc_assert (TREE_CODE (type1) == code1);
}
if (TREE_CODE (op2) == EXCESS_PRECISION_EXPR)
{
op2 = TREE_OPERAND (op2, 0);
type2 = TREE_TYPE (op2);
gcc_assert (TREE_CODE (type2) == code2);
}
}
if (warn_cxx_compat)
{
tree t1 = op1_original_type ? op1_original_type : TREE_TYPE (orig_op1);
tree t2 = op2_original_type ? op2_original_type : TREE_TYPE (orig_op2);
if (TREE_CODE (t1) == ENUMERAL_TYPE
&& TREE_CODE (t2) == ENUMERAL_TYPE
&& TYPE_MAIN_VARIANT (t1) != TYPE_MAIN_VARIANT (t2))
warning_at (colon_loc, OPT_Wc___compat,
("different enum types in conditional is "
"invalid in C++: %qT vs %qT"),
t1, t2);
}
/* Quickly detect the usual case where op1 and op2 have the same type
after promotion. */
if (TYPE_MAIN_VARIANT (type1) == TYPE_MAIN_VARIANT (type2))
{
if (type1 == type2)
result_type = type1;
else
result_type = TYPE_MAIN_VARIANT (type1);
}
else if ((code1 == INTEGER_TYPE || code1 == REAL_TYPE
|| code1 == COMPLEX_TYPE)
&& (code2 == INTEGER_TYPE || code2 == REAL_TYPE
|| code2 == COMPLEX_TYPE))
{
/* In C11, a conditional expression between a floating-point
type and an integer type should convert the integer type to
the evaluation format of the floating-point type, with
possible excess precision. */
tree eptype1 = type1;
tree eptype2 = type2;
if (flag_isoc11)
{
tree eptype;
if (ANY_INTEGRAL_TYPE_P (type1)
&& (eptype = excess_precision_type (type2)) != NULL_TREE)
{
eptype2 = eptype;
if (!semantic_result_type)
semantic_result_type = c_common_type (type1, type2);
}
else if (ANY_INTEGRAL_TYPE_P (type2)
&& (eptype = excess_precision_type (type1)) != NULL_TREE)
{
eptype1 = eptype;
if (!semantic_result_type)
semantic_result_type = c_common_type (type1, type2);
}
}
result_type = c_common_type (eptype1, eptype2);
if (result_type == error_mark_node)
return error_mark_node;
do_warn_double_promotion (result_type, type1, type2,
"implicit conversion from %qT to %qT to "
"match other result of conditional",
colon_loc);
/* If -Wsign-compare, warn here if type1 and type2 have
different signedness. We'll promote the signed to unsigned
and later code won't know it used to be different.
Do this check on the original types, so that explicit casts
will be considered, but default promotions won't. */
if (c_inhibit_evaluation_warnings == 0)
{
int unsigned_op1 = TYPE_UNSIGNED (TREE_TYPE (orig_op1));
int unsigned_op2 = TYPE_UNSIGNED (TREE_TYPE (orig_op2));
if (unsigned_op1 ^ unsigned_op2)
{
bool ovf;
/* Do not warn if the result type is signed, since the
signed type will only be chosen if it can represent
all the values of the unsigned type. */
if (!TYPE_UNSIGNED (result_type))
/* OK */;
else
{
bool op1_maybe_const = true;
bool op2_maybe_const = true;
/* Do not warn if the signed quantity is an
unsuffixed integer literal (or some static
constant expression involving such literals) and
it is non-negative. This warning requires the
operands to be folded for best results, so do
that folding in this case even without
warn_sign_compare to avoid warning options
possibly affecting code generation. */
c_inhibit_evaluation_warnings
+= (ifexp == truthvalue_false_node);
op1 = c_fully_fold (op1, require_constant_value,
&op1_maybe_const);
c_inhibit_evaluation_warnings
-= (ifexp == truthvalue_false_node);
c_inhibit_evaluation_warnings
+= (ifexp == truthvalue_true_node);
op2 = c_fully_fold (op2, require_constant_value,
&op2_maybe_const);
c_inhibit_evaluation_warnings
-= (ifexp == truthvalue_true_node);
if (warn_sign_compare)
{
if ((unsigned_op2
&& tree_expr_nonnegative_warnv_p (op1, &ovf))
|| (unsigned_op1
&& tree_expr_nonnegative_warnv_p (op2, &ovf)))
/* OK */;
else if (unsigned_op2)
warning_at (op1_loc, OPT_Wsign_compare,
"operand of %<?:%> changes signedness from "
"%qT to %qT due to unsignedness of other "
"operand", TREE_TYPE (orig_op1),
TREE_TYPE (orig_op2));
else
warning_at (op2_loc, OPT_Wsign_compare,
"operand of %<?:%> changes signedness from "
"%qT to %qT due to unsignedness of other "
"operand", TREE_TYPE (orig_op2),
TREE_TYPE (orig_op1));
}
if (!op1_maybe_const || TREE_CODE (op1) != INTEGER_CST)
op1 = c_wrap_maybe_const (op1, !op1_maybe_const);
if (!op2_maybe_const || TREE_CODE (op2) != INTEGER_CST)
op2 = c_wrap_maybe_const (op2, !op2_maybe_const);
}
}
}
}
else if (code1 == VOID_TYPE || code2 == VOID_TYPE)
{
if (code1 != VOID_TYPE || code2 != VOID_TYPE)
pedwarn (colon_loc, OPT_Wpedantic,
"ISO C forbids conditional expr with only one void side");
result_type = void_type_node;
}
else if (code1 == POINTER_TYPE && code2 == POINTER_TYPE)
{
addr_space_t as1 = TYPE_ADDR_SPACE (TREE_TYPE (type1));
addr_space_t as2 = TYPE_ADDR_SPACE (TREE_TYPE (type2));
addr_space_t as_common;
if (comp_target_types (colon_loc, type1, type2))
result_type = common_pointer_type (type1, type2);
else if (null_pointer_constant_p (orig_op1))
result_type = type2;
else if (null_pointer_constant_p (orig_op2))
result_type = type1;
else if (!addr_space_superset (as1, as2, &as_common))
{
error_at (colon_loc, "pointers to disjoint address spaces "
"used in conditional expression");
return error_mark_node;
}
else if (VOID_TYPE_P (TREE_TYPE (type1))
&& !TYPE_ATOMIC (TREE_TYPE (type1)))
{
if ((TREE_CODE (TREE_TYPE (type2)) == ARRAY_TYPE)
&& (TYPE_QUALS (strip_array_types (TREE_TYPE (type2)))
& ~TYPE_QUALS (TREE_TYPE (type1))))
warning_at (colon_loc, OPT_Wdiscarded_array_qualifiers,
"pointer to array loses qualifier "
"in conditional expression");
if (TREE_CODE (TREE_TYPE (type2)) == FUNCTION_TYPE)
pedwarn (colon_loc, OPT_Wpedantic,
"ISO C forbids conditional expr between "
"%<void *%> and function pointer");
result_type = build_pointer_type (qualify_type (TREE_TYPE (type1),
TREE_TYPE (type2)));
}
else if (VOID_TYPE_P (TREE_TYPE (type2))
&& !TYPE_ATOMIC (TREE_TYPE (type2)))
{
if ((TREE_CODE (TREE_TYPE (type1)) == ARRAY_TYPE)
&& (TYPE_QUALS (strip_array_types (TREE_TYPE (type1)))
& ~TYPE_QUALS (TREE_TYPE (type2))))
warning_at (colon_loc, OPT_Wdiscarded_array_qualifiers,
"pointer to array loses qualifier "
"in conditional expression");
if (TREE_CODE (TREE_TYPE (type1)) == FUNCTION_TYPE)
pedwarn (colon_loc, OPT_Wpedantic,
"ISO C forbids conditional expr between "
"%<void *%> and function pointer");
result_type = build_pointer_type (qualify_type (TREE_TYPE (type2),
TREE_TYPE (type1)));
}
/* Objective-C pointer comparisons are a bit more lenient. */
else if (objc_have_common_type (type1, type2, -3, NULL_TREE))
result_type = objc_common_type (type1, type2);
else
{
int qual = ENCODE_QUAL_ADDR_SPACE (as_common);
if (bltin1 && bltin2)
warning_at (colon_loc, OPT_Wincompatible_pointer_types,
"pointer type mismatch between %qT and %qT "
"of %qD and %qD in conditional expression",
type1, type2, bltin1, bltin2);
else
pedwarn (colon_loc, 0,
"pointer type mismatch in conditional expression");
result_type = build_pointer_type
(build_qualified_type (void_type_node, qual));
}
}
else if (code1 == POINTER_TYPE && code2 == INTEGER_TYPE)
{
if (!null_pointer_constant_p (orig_op2))
pedwarn (colon_loc, 0,
"pointer/integer type mismatch in conditional expression");
else
{
op2 = null_pointer_node;
}
result_type = type1;
}
else if (code2 == POINTER_TYPE && code1 == INTEGER_TYPE)
{
if (!null_pointer_constant_p (orig_op1))
pedwarn (colon_loc, 0,
"pointer/integer type mismatch in conditional expression");
else
{
op1 = null_pointer_node;
}
result_type = type2;
}
if (!result_type)
{
if (flag_cond_mismatch)
result_type = void_type_node;
else
{
error_at (colon_loc, "type mismatch in conditional expression");
return error_mark_node;
}
}
/* Merge const and volatile flags of the incoming types. */
result_type
= build_type_variant (result_type,
TYPE_READONLY (type1) || TYPE_READONLY (type2),
TYPE_VOLATILE (type1) || TYPE_VOLATILE (type2));
op1 = ep_convert_and_check (colon_loc, result_type, op1,
semantic_result_type);
op2 = ep_convert_and_check (colon_loc, result_type, op2,
semantic_result_type);
if (ifexp_bcp && ifexp == truthvalue_true_node)
{
op2_int_operands = true;
op1 = c_fully_fold (op1, require_constant_value, NULL);
}
if (ifexp_bcp && ifexp == truthvalue_false_node)
{
op1_int_operands = true;
op2 = c_fully_fold (op2, require_constant_value, NULL);
}
int_const = int_operands = (ifexp_int_operands
&& op1_int_operands
&& op2_int_operands);
if (int_operands)
{
int_const = ((ifexp == truthvalue_true_node
&& TREE_CODE (orig_op1) == INTEGER_CST
&& !TREE_OVERFLOW (orig_op1))
|| (ifexp == truthvalue_false_node
&& TREE_CODE (orig_op2) == INTEGER_CST
&& !TREE_OVERFLOW (orig_op2)));
}
/* Need to convert condition operand into a vector mask. */
if (VECTOR_TYPE_P (TREE_TYPE (ifexp)))
{
tree vectype = TREE_TYPE (ifexp);
tree elem_type = TREE_TYPE (vectype);
tree zero = build_int_cst (elem_type, 0);
tree zero_vec = build_vector_from_val (vectype, zero);
tree cmp_type = truth_type_for (vectype);
ifexp = build2 (NE_EXPR, cmp_type, ifexp, zero_vec);
}
if (int_const || (ifexp_bcp && TREE_CODE (ifexp) == INTEGER_CST))
ret = fold_build3_loc (colon_loc, COND_EXPR, result_type, ifexp, op1, op2);
else
{
if (int_operands)
{
/* Use c_fully_fold here, since C_MAYBE_CONST_EXPR might be
nested inside of the expression. */
op1 = c_fully_fold (op1, false, NULL);
op2 = c_fully_fold (op2, false, NULL);
}
ret = build3 (COND_EXPR, result_type, ifexp, op1, op2);
if (int_operands)
ret = note_integer_operands (ret);
}
if (semantic_result_type)
ret = build1 (EXCESS_PRECISION_EXPR, semantic_result_type, ret);
protected_set_expr_location (ret, colon_loc);
/* If the OP1 and OP2 are the same and don't have side-effects,
warn here, because the COND_EXPR will be turned into OP1. */
if (warn_duplicated_branches
&& TREE_CODE (ret) == COND_EXPR
&& (op1 == op2 || operand_equal_p (op1, op2, 0)))
warning_at (EXPR_LOCATION (ret), OPT_Wduplicated_branches,
"this condition has identical branches");
return ret;
}
/* Return a compound expression that performs two expressions and
returns the value of the second of them.
LOC is the location of the COMPOUND_EXPR. */
tree
build_compound_expr (location_t loc, tree expr1, tree expr2)
{
bool expr1_int_operands, expr2_int_operands;
tree eptype = NULL_TREE;
tree ret;
expr1_int_operands = EXPR_INT_CONST_OPERANDS (expr1);
if (expr1_int_operands)
expr1 = remove_c_maybe_const_expr (expr1);
expr2_int_operands = EXPR_INT_CONST_OPERANDS (expr2);
if (expr2_int_operands)
expr2 = remove_c_maybe_const_expr (expr2);
if (TREE_CODE (expr1) == EXCESS_PRECISION_EXPR)
expr1 = TREE_OPERAND (expr1, 0);
if (TREE_CODE (expr2) == EXCESS_PRECISION_EXPR)
{
eptype = TREE_TYPE (expr2);
expr2 = TREE_OPERAND (expr2, 0);
}
if (!TREE_SIDE_EFFECTS (expr1))
{
/* The left-hand operand of a comma expression is like an expression
statement: with -Wunused, we should warn if it doesn't have
any side-effects, unless it was explicitly cast to (void). */
if (warn_unused_value)
{
if (VOID_TYPE_P (TREE_TYPE (expr1))
&& CONVERT_EXPR_P (expr1))
; /* (void) a, b */
else if (VOID_TYPE_P (TREE_TYPE (expr1))
&& TREE_CODE (expr1) == COMPOUND_EXPR
&& CONVERT_EXPR_P (TREE_OPERAND (expr1, 1)))
; /* (void) a, (void) b, c */
else
warning_at (loc, OPT_Wunused_value,
"left-hand operand of comma expression has no effect");
}
}
else if (TREE_CODE (expr1) == COMPOUND_EXPR
&& warn_unused_value)
{
tree r = expr1;
location_t cloc = loc;
while (TREE_CODE (r) == COMPOUND_EXPR)
{
if (EXPR_HAS_LOCATION (r))
cloc = EXPR_LOCATION (r);
r = TREE_OPERAND (r, 1);
}
if (!TREE_SIDE_EFFECTS (r)
&& !VOID_TYPE_P (TREE_TYPE (r))
&& !CONVERT_EXPR_P (r))
warning_at (cloc, OPT_Wunused_value,
"right-hand operand of comma expression has no effect");
}
/* With -Wunused, we should also warn if the left-hand operand does have
side-effects, but computes a value which is not used. For example, in
`foo() + bar(), baz()' the result of the `+' operator is not used,
so we should issue a warning. */
else if (warn_unused_value)
warn_if_unused_value (expr1, loc);
if (expr2 == error_mark_node)
return error_mark_node;
ret = build2 (COMPOUND_EXPR, TREE_TYPE (expr2), expr1, expr2);
if (flag_isoc99
&& expr1_int_operands
&& expr2_int_operands)
ret = note_integer_operands (ret);
if (eptype)
ret = build1 (EXCESS_PRECISION_EXPR, eptype, ret);
protected_set_expr_location (ret, loc);
return ret;
}
/* Issue -Wcast-qual warnings when appropriate. TYPE is the type to
which we are casting. OTYPE is the type of the expression being
cast. Both TYPE and OTYPE are pointer types. LOC is the location
of the cast. -Wcast-qual appeared on the command line. Named
address space qualifiers are not handled here, because they result
in different warnings. */
static void
handle_warn_cast_qual (location_t loc, tree type, tree otype)
{
tree in_type = type;
tree in_otype = otype;
int added = 0;
int discarded = 0;
bool is_const;
/* Check that the qualifiers on IN_TYPE are a superset of the
qualifiers of IN_OTYPE. The outermost level of POINTER_TYPE
nodes is uninteresting and we stop as soon as we hit a
non-POINTER_TYPE node on either type. */
do
{
in_otype = TREE_TYPE (in_otype);
in_type = TREE_TYPE (in_type);
/* GNU C allows cv-qualified function types. 'const' means the
function is very pure, 'volatile' means it can't return. We
need to warn when such qualifiers are added, not when they're
taken away. */
if (TREE_CODE (in_otype) == FUNCTION_TYPE
&& TREE_CODE (in_type) == FUNCTION_TYPE)
added |= (TYPE_QUALS_NO_ADDR_SPACE (in_type)
& ~TYPE_QUALS_NO_ADDR_SPACE (in_otype));
else
discarded |= (TYPE_QUALS_NO_ADDR_SPACE (in_otype)
& ~TYPE_QUALS_NO_ADDR_SPACE (in_type));
}
while (TREE_CODE (in_type) == POINTER_TYPE
&& TREE_CODE (in_otype) == POINTER_TYPE);
if (added)
warning_at (loc, OPT_Wcast_qual,
"cast adds %q#v qualifier to function type", added);
if (discarded)
/* There are qualifiers present in IN_OTYPE that are not present
in IN_TYPE. */
warning_at (loc, OPT_Wcast_qual,
"cast discards %qv qualifier from pointer target type",
discarded);
if (added || discarded)
return;
/* A cast from **T to const **T is unsafe, because it can cause a
const value to be changed with no additional warning. We only
issue this warning if T is the same on both sides, and we only
issue the warning if there are the same number of pointers on
both sides, as otherwise the cast is clearly unsafe anyhow. A
cast is unsafe when a qualifier is added at one level and const
is not present at all outer levels.
To issue this warning, we check at each level whether the cast
adds new qualifiers not already seen. We don't need to special
case function types, as they won't have the same
TYPE_MAIN_VARIANT. */
if (TYPE_MAIN_VARIANT (in_type) != TYPE_MAIN_VARIANT (in_otype))
return;
if (TREE_CODE (TREE_TYPE (type)) != POINTER_TYPE)
return;
in_type = type;
in_otype = otype;
is_const = TYPE_READONLY (TREE_TYPE (in_type));
do
{
in_type = TREE_TYPE (in_type);
in_otype = TREE_TYPE (in_otype);
if ((TYPE_QUALS (in_type) &~ TYPE_QUALS (in_otype)) != 0
&& !is_const)
{
warning_at (loc, OPT_Wcast_qual,
"to be safe all intermediate pointers in cast from "
"%qT to %qT must be %<const%> qualified",
otype, type);
break;
}
if (is_const)
is_const = TYPE_READONLY (in_type);
}
while (TREE_CODE (in_type) == POINTER_TYPE);
}
/* Heuristic check if two parameter types can be considered ABI-equivalent. */
static bool
c_safe_arg_type_equiv_p (tree t1, tree t2)
{
t1 = TYPE_MAIN_VARIANT (t1);
t2 = TYPE_MAIN_VARIANT (t2);
if (TREE_CODE (t1) == POINTER_TYPE
&& TREE_CODE (t2) == POINTER_TYPE)
return true;
/* The signedness of the parameter matters only when an integral
type smaller than int is promoted to int, otherwise only the
precision of the parameter matters.
This check should make sure that the callee does not see
undefined values in argument registers. */
if (INTEGRAL_TYPE_P (t1)
&& INTEGRAL_TYPE_P (t2)
&& TYPE_PRECISION (t1) == TYPE_PRECISION (t2)
&& (TYPE_UNSIGNED (t1) == TYPE_UNSIGNED (t2)
|| !targetm.calls.promote_prototypes (NULL_TREE)
|| TYPE_PRECISION (t1) >= TYPE_PRECISION (integer_type_node)))
return true;
return comptypes (t1, t2);
}
/* Check if a type cast between two function types can be considered safe. */
static bool
c_safe_function_type_cast_p (tree t1, tree t2)
{
if (TREE_TYPE (t1) == void_type_node &&
TYPE_ARG_TYPES (t1) == void_list_node)
return true;
if (TREE_TYPE (t2) == void_type_node &&
TYPE_ARG_TYPES (t2) == void_list_node)
return true;
if (!c_safe_arg_type_equiv_p (TREE_TYPE (t1), TREE_TYPE (t2)))
return false;
for (t1 = TYPE_ARG_TYPES (t1), t2 = TYPE_ARG_TYPES (t2);
t1 && t2;
t1 = TREE_CHAIN (t1), t2 = TREE_CHAIN (t2))
if (!c_safe_arg_type_equiv_p (TREE_VALUE (t1), TREE_VALUE (t2)))
return false;
return true;
}
/* Build an expression representing a cast to type TYPE of expression EXPR.
LOC is the location of the cast-- typically the open paren of the cast. */
tree
build_c_cast (location_t loc, tree type, tree expr)
{
tree value;
bool int_operands = EXPR_INT_CONST_OPERANDS (expr);
if (TREE_CODE (expr) == EXCESS_PRECISION_EXPR)
expr = TREE_OPERAND (expr, 0);
value = expr;
if (int_operands)
value = remove_c_maybe_const_expr (value);
if (type == error_mark_node || expr == error_mark_node)
return error_mark_node;
/* The ObjC front-end uses TYPE_MAIN_VARIANT to tie together types differing
only in <protocol> qualifications. But when constructing cast expressions,
the protocols do matter and must be kept around. */
if (objc_is_object_ptr (type) && objc_is_object_ptr (TREE_TYPE (expr)))
return build1 (NOP_EXPR, type, expr);
type = TYPE_MAIN_VARIANT (type);
if (TREE_CODE (type) == ARRAY_TYPE)
{
error_at (loc, "cast specifies array type");
return error_mark_node;
}
if (TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc, "cast specifies function type");
return error_mark_node;
}
if (!VOID_TYPE_P (type))
{
value = require_complete_type (loc, value);
if (value == error_mark_node)
return error_mark_node;
}
if (type == TYPE_MAIN_VARIANT (TREE_TYPE (value)))
{
if (RECORD_OR_UNION_TYPE_P (type))
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids casting nonscalar to the same type");
/* Convert to remove any qualifiers from VALUE's type. */
value = convert (type, value);
}
else if (TREE_CODE (type) == UNION_TYPE)
{
tree field;
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
if (TREE_TYPE (field) != error_mark_node
&& comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (field)),
TYPE_MAIN_VARIANT (TREE_TYPE (value))))
break;
if (field)
{
tree t;
bool maybe_const = true;
pedwarn (loc, OPT_Wpedantic, "ISO C forbids casts to union type");
t = c_fully_fold (value, false, &maybe_const);
t = build_constructor_single (type, field, t);
if (!maybe_const)
t = c_wrap_maybe_const (t, true);
t = digest_init (loc, type, t,
NULL_TREE, false, true, 0);
TREE_CONSTANT (t) = TREE_CONSTANT (value);
return t;
}
error_at (loc, "cast to union type from type not present in union");
return error_mark_node;
}
else
{
tree otype, ovalue;
if (type == void_type_node)
{
tree t = build1 (CONVERT_EXPR, type, value);
SET_EXPR_LOCATION (t, loc);
return t;
}
otype = TREE_TYPE (value);
/* Optionally warn about potentially worrisome casts. */
if (warn_cast_qual
&& TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE)
handle_warn_cast_qual (loc, type, otype);
/* Warn about conversions between pointers to disjoint
address spaces. */
if (TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE
&& !null_pointer_constant_p (value))
{
addr_space_t as_to = TYPE_ADDR_SPACE (TREE_TYPE (type));
addr_space_t as_from = TYPE_ADDR_SPACE (TREE_TYPE (otype));
addr_space_t as_common;
if (!addr_space_superset (as_to, as_from, &as_common))
{
if (ADDR_SPACE_GENERIC_P (as_from))
warning_at (loc, 0, "cast to %s address space pointer "
"from disjoint generic address space pointer",
c_addr_space_name (as_to));
else if (ADDR_SPACE_GENERIC_P (as_to))
warning_at (loc, 0, "cast to generic address space pointer "
"from disjoint %s address space pointer",
c_addr_space_name (as_from));
else
warning_at (loc, 0, "cast to %s address space pointer "
"from disjoint %s address space pointer",
c_addr_space_name (as_to),
c_addr_space_name (as_from));
}
}
/* Warn about possible alignment problems. */
if ((STRICT_ALIGNMENT || warn_cast_align == 2)
&& TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (otype)) != VOID_TYPE
&& TREE_CODE (TREE_TYPE (otype)) != FUNCTION_TYPE
/* Don't warn about opaque types, where the actual alignment
restriction is unknown. */
&& !(RECORD_OR_UNION_TYPE_P (TREE_TYPE (otype))
&& TYPE_MODE (TREE_TYPE (otype)) == VOIDmode)
&& min_align_of_type (TREE_TYPE (type))
> min_align_of_type (TREE_TYPE (otype)))
warning_at (loc, OPT_Wcast_align,
"cast increases required alignment of target type");
if (TREE_CODE (type) == INTEGER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE
&& TYPE_PRECISION (type) != TYPE_PRECISION (otype))
/* Unlike conversion of integers to pointers, where the
warning is disabled for converting constants because
of cases such as SIG_*, warn about converting constant
pointers to integers. In some cases it may cause unwanted
sign extension, and a warning is appropriate. */
warning_at (loc, OPT_Wpointer_to_int_cast,
"cast from pointer to integer of different size");
if (TREE_CODE (value) == CALL_EXPR
&& TREE_CODE (type) != TREE_CODE (otype))
warning_at (loc, OPT_Wbad_function_cast,
"cast from function call of type %qT "
"to non-matching type %qT", otype, type);
if (TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (otype) == INTEGER_TYPE
&& TYPE_PRECISION (type) != TYPE_PRECISION (otype)
/* Don't warn about converting any constant. */
&& !TREE_CONSTANT (value))
warning_at (loc,
OPT_Wint_to_pointer_cast, "cast to pointer from integer "
"of different size");
if (warn_strict_aliasing <= 2)
strict_aliasing_warning (EXPR_LOCATION (value), type, expr);
/* If pedantic, warn for conversions between function and object
pointer types, except for converting a null pointer constant
to function pointer type. */
if (pedantic
&& TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (otype)) == FUNCTION_TYPE
&& TREE_CODE (TREE_TYPE (type)) != FUNCTION_TYPE)
pedwarn (loc, OPT_Wpedantic, "ISO C forbids "
"conversion of function pointer to object pointer type");
if (pedantic
&& TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE
&& TREE_CODE (TREE_TYPE (otype)) != FUNCTION_TYPE
&& !null_pointer_constant_p (value))
pedwarn (loc, OPT_Wpedantic, "ISO C forbids "
"conversion of object pointer to function pointer type");
if (TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE
&& TREE_CODE (TREE_TYPE (otype)) == FUNCTION_TYPE
&& !c_safe_function_type_cast_p (TREE_TYPE (type),
TREE_TYPE (otype)))
warning_at (loc, OPT_Wcast_function_type,
"cast between incompatible function types"
" from %qT to %qT", otype, type);
ovalue = value;
value = convert (type, value);
/* Ignore any integer overflow caused by the cast. */
if (TREE_CODE (value) == INTEGER_CST && !FLOAT_TYPE_P (otype))
{
if (CONSTANT_CLASS_P (ovalue) && TREE_OVERFLOW (ovalue))
{
if (!TREE_OVERFLOW (value))
{
/* Avoid clobbering a shared constant. */
value = copy_node (value);
TREE_OVERFLOW (value) = TREE_OVERFLOW (ovalue);
}
}
else if (TREE_OVERFLOW (value))
/* Reset VALUE's overflow flags, ensuring constant sharing. */
value = wide_int_to_tree (TREE_TYPE (value), wi::to_wide (value));
}
}
/* Don't let a cast be an lvalue. */
if (lvalue_p (value))
value = non_lvalue_loc (loc, value);
/* Don't allow the results of casting to floating-point or complex
types be confused with actual constants, or casts involving
integer and pointer types other than direct integer-to-integer
and integer-to-pointer be confused with integer constant
expressions and null pointer constants. */
if (TREE_CODE (value) == REAL_CST
|| TREE_CODE (value) == COMPLEX_CST
|| (TREE_CODE (value) == INTEGER_CST
&& !((TREE_CODE (expr) == INTEGER_CST
&& INTEGRAL_TYPE_P (TREE_TYPE (expr)))
|| TREE_CODE (expr) == REAL_CST
|| TREE_CODE (expr) == COMPLEX_CST)))
value = build1 (NOP_EXPR, type, value);
/* If the expression has integer operands and so can occur in an
unevaluated part of an integer constant expression, ensure the
return value reflects this. */
if (int_operands
&& INTEGRAL_TYPE_P (type)
&& !EXPR_INT_CONST_OPERANDS (value))
value = note_integer_operands (value);
protected_set_expr_location (value, loc);
return value;
}
/* Interpret a cast of expression EXPR to type TYPE. LOC is the
location of the open paren of the cast, or the position of the cast
expr. */
tree
c_cast_expr (location_t loc, struct c_type_name *type_name, tree expr)
{
tree type;
tree type_expr = NULL_TREE;
bool type_expr_const = true;
tree ret;
int saved_wsp = warn_strict_prototypes;
/* This avoids warnings about unprototyped casts on
integers. E.g. "#define SIG_DFL (void(*)())0". */
if (TREE_CODE (expr) == INTEGER_CST)
warn_strict_prototypes = 0;
type = groktypename (type_name, &type_expr, &type_expr_const);
warn_strict_prototypes = saved_wsp;
if (TREE_CODE (expr) == ADDR_EXPR && !VOID_TYPE_P (type)
&& reject_gcc_builtin (expr))
return error_mark_node;
ret = build_c_cast (loc, type, expr);
if (type_expr)
{
bool inner_expr_const = true;
ret = c_fully_fold (ret, require_constant_value, &inner_expr_const);
ret = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (ret), type_expr, ret);
C_MAYBE_CONST_EXPR_NON_CONST (ret) = !(type_expr_const
&& inner_expr_const);
SET_EXPR_LOCATION (ret, loc);
}
if (!EXPR_HAS_LOCATION (ret))
protected_set_expr_location (ret, loc);
/* C++ does not permits types to be defined in a cast, but it
allows references to incomplete types. */
if (warn_cxx_compat && type_name->specs->typespec_kind == ctsk_tagdef)
warning_at (loc, OPT_Wc___compat,
"defining a type in a cast is invalid in C++");
return ret;
}
/* Build an assignment expression of lvalue LHS from value RHS.
If LHS_ORIGTYPE is not NULL, it is the original type of LHS, which
may differ from TREE_TYPE (LHS) for an enum bitfield.
MODIFYCODE is the code for a binary operator that we use
to combine the old value of LHS with RHS to get the new value.
Or else MODIFYCODE is NOP_EXPR meaning do a simple assignment.
If RHS_ORIGTYPE is not NULL_TREE, it is the original type of RHS,
which may differ from TREE_TYPE (RHS) for an enum value.
LOCATION is the location of the MODIFYCODE operator.
RHS_LOC is the location of the RHS. */
tree
build_modify_expr (location_t location, tree lhs, tree lhs_origtype,
enum tree_code modifycode,
location_t rhs_loc, tree rhs, tree rhs_origtype)
{
tree result;
tree newrhs;
tree rhseval = NULL_TREE;
tree lhstype = TREE_TYPE (lhs);
tree olhstype = lhstype;
bool npc;
bool is_atomic_op;
/* Types that aren't fully specified cannot be used in assignments. */
lhs = require_complete_type (location, lhs);
/* Avoid duplicate error messages from operands that had errors. */
if (TREE_CODE (lhs) == ERROR_MARK || TREE_CODE (rhs) == ERROR_MARK)
return error_mark_node;
/* Ensure an error for assigning a non-lvalue array to an array in
C90. */
if (TREE_CODE (lhstype) == ARRAY_TYPE)
{
error_at (location, "assignment to expression with array type");
return error_mark_node;
}
/* For ObjC properties, defer this check. */
if (!objc_is_property_ref (lhs) && !lvalue_or_else (location, lhs, lv_assign))
return error_mark_node;
is_atomic_op = really_atomic_lvalue (lhs);
newrhs = rhs;
if (TREE_CODE (lhs) == C_MAYBE_CONST_EXPR)
{
tree inner = build_modify_expr (location, C_MAYBE_CONST_EXPR_EXPR (lhs),
lhs_origtype, modifycode, rhs_loc, rhs,
rhs_origtype);
if (inner == error_mark_node)
return error_mark_node;
result = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (inner),
C_MAYBE_CONST_EXPR_PRE (lhs), inner);
gcc_assert (!C_MAYBE_CONST_EXPR_INT_OPERANDS (lhs));
C_MAYBE_CONST_EXPR_NON_CONST (result) = 1;
protected_set_expr_location (result, location);
return result;
}
/* If a binary op has been requested, combine the old LHS value with the RHS
producing the value we should actually store into the LHS. */
if (modifycode != NOP_EXPR)
{
lhs = c_fully_fold (lhs, false, NULL, true);
lhs = stabilize_reference (lhs);
/* Construct the RHS for any non-atomic compound assignemnt. */
if (!is_atomic_op)
{
/* If in LHS op= RHS the RHS has side-effects, ensure they
are preevaluated before the rest of the assignment expression's
side-effects, because RHS could contain e.g. function calls
that modify LHS. */
if (TREE_SIDE_EFFECTS (rhs))
{
if (TREE_CODE (rhs) == EXCESS_PRECISION_EXPR)
newrhs = save_expr (TREE_OPERAND (rhs, 0));
else
newrhs = save_expr (rhs);
rhseval = newrhs;
if (TREE_CODE (rhs) == EXCESS_PRECISION_EXPR)
newrhs = build1 (EXCESS_PRECISION_EXPR, TREE_TYPE (rhs),
newrhs);
}
newrhs = build_binary_op (location,
modifycode, lhs, newrhs, true);
/* The original type of the right hand side is no longer
meaningful. */
rhs_origtype = NULL_TREE;
}
}
if (c_dialect_objc ())
{
/* Check if we are modifying an Objective-C property reference;
if so, we need to generate setter calls. */
if (TREE_CODE (newrhs) == EXCESS_PRECISION_EXPR)
result = objc_maybe_build_modify_expr (lhs, TREE_OPERAND (newrhs, 0));
else
result = objc_maybe_build_modify_expr (lhs, newrhs);
if (result)
goto return_result;
/* Else, do the check that we postponed for Objective-C. */
if (!lvalue_or_else (location, lhs, lv_assign))
return error_mark_node;
}
/* Give an error for storing in something that is 'const'. */
if (TYPE_READONLY (lhstype)
|| (RECORD_OR_UNION_TYPE_P (lhstype)
&& C_TYPE_FIELDS_READONLY (lhstype)))
{
readonly_error (location, lhs, lv_assign);
return error_mark_node;
}
else if (TREE_READONLY (lhs))
readonly_warning (lhs, lv_assign);
/* If storing into a structure or union member,
it has probably been given type `int'.
Compute the type that would go with
the actual amount of storage the member occupies. */
if (TREE_CODE (lhs) == COMPONENT_REF
&& (TREE_CODE (lhstype) == INTEGER_TYPE
|| TREE_CODE (lhstype) == BOOLEAN_TYPE
|| TREE_CODE (lhstype) == REAL_TYPE
|| TREE_CODE (lhstype) == ENUMERAL_TYPE))
lhstype = TREE_TYPE (get_unwidened (lhs, 0));
/* If storing in a field that is in actuality a short or narrower than one,
we must store in the field in its actual type. */
if (lhstype != TREE_TYPE (lhs))
{
lhs = copy_node (lhs);
TREE_TYPE (lhs) = lhstype;
}
/* Issue -Wc++-compat warnings about an assignment to an enum type
when LHS does not have its original type. This happens for,
e.g., an enum bitfield in a struct. */
if (warn_cxx_compat
&& lhs_origtype != NULL_TREE
&& lhs_origtype != lhstype
&& TREE_CODE (lhs_origtype) == ENUMERAL_TYPE)
{
tree checktype = (rhs_origtype != NULL_TREE
? rhs_origtype
: TREE_TYPE (rhs));
if (checktype != error_mark_node
&& (TYPE_MAIN_VARIANT (checktype) != TYPE_MAIN_VARIANT (lhs_origtype)
|| (is_atomic_op && modifycode != NOP_EXPR)))
warning_at (location, OPT_Wc___compat,
"enum conversion in assignment is invalid in C++");
}
/* If the lhs is atomic, remove that qualifier. */
if (is_atomic_op)
{
lhstype = build_qualified_type (lhstype,
(TYPE_QUALS (lhstype)
& ~TYPE_QUAL_ATOMIC));
olhstype = build_qualified_type (olhstype,
(TYPE_QUALS (lhstype)
& ~TYPE_QUAL_ATOMIC));
}
/* Convert new value to destination type. Fold it first, then
restore any excess precision information, for the sake of
conversion warnings. */
if (!(is_atomic_op && modifycode != NOP_EXPR))
{
tree rhs_semantic_type = NULL_TREE;
if (TREE_CODE (newrhs) == EXCESS_PRECISION_EXPR)
{
rhs_semantic_type = TREE_TYPE (newrhs);
newrhs = TREE_OPERAND (newrhs, 0);
}
npc = null_pointer_constant_p (newrhs);
newrhs = c_fully_fold (newrhs, false, NULL);
if (rhs_semantic_type)
newrhs = build1 (EXCESS_PRECISION_EXPR, rhs_semantic_type, newrhs);
newrhs = convert_for_assignment (location, rhs_loc, lhstype, newrhs,
rhs_origtype, ic_assign, npc,
NULL_TREE, NULL_TREE, 0);
if (TREE_CODE (newrhs) == ERROR_MARK)
return error_mark_node;
}
/* Emit ObjC write barrier, if necessary. */
if (c_dialect_objc () && flag_objc_gc)
{
result = objc_generate_write_barrier (lhs, modifycode, newrhs);
if (result)
{
protected_set_expr_location (result, location);
goto return_result;
}
}
/* Scan operands. */
if (is_atomic_op)
result = build_atomic_assign (location, lhs, modifycode, newrhs, false);
else
{
result = build2 (MODIFY_EXPR, lhstype, lhs, newrhs);
TREE_SIDE_EFFECTS (result) = 1;
protected_set_expr_location (result, location);
}
/* If we got the LHS in a different type for storing in,
convert the result back to the nominal type of LHS
so that the value we return always has the same type
as the LHS argument. */
if (olhstype == TREE_TYPE (result))
goto return_result;
result = convert_for_assignment (location, rhs_loc, olhstype, result,
rhs_origtype, ic_assign, false, NULL_TREE,
NULL_TREE, 0);
protected_set_expr_location (result, location);
return_result:
if (rhseval)
result = build2 (COMPOUND_EXPR, TREE_TYPE (result), rhseval, result);
return result;
}
/* Return whether STRUCT_TYPE has an anonymous field with type TYPE.
This is used to implement -fplan9-extensions. */
static bool
find_anonymous_field_with_type (tree struct_type, tree type)
{
tree field;
bool found;
gcc_assert (RECORD_OR_UNION_TYPE_P (struct_type));
found = false;
for (field = TYPE_FIELDS (struct_type);
field != NULL_TREE;
field = TREE_CHAIN (field))
{
tree fieldtype = (TYPE_ATOMIC (TREE_TYPE (field))
? c_build_qualified_type (TREE_TYPE (field),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (TREE_TYPE (field)));
if (DECL_NAME (field) == NULL
&& comptypes (type, fieldtype))
{
if (found)
return false;
found = true;
}
else if (DECL_NAME (field) == NULL
&& RECORD_OR_UNION_TYPE_P (TREE_TYPE (field))
&& find_anonymous_field_with_type (TREE_TYPE (field), type))
{
if (found)
return false;
found = true;
}
}
return found;
}
/* RHS is an expression whose type is pointer to struct. If there is
an anonymous field in RHS with type TYPE, then return a pointer to
that field in RHS. This is used with -fplan9-extensions. This
returns NULL if no conversion could be found. */
static tree
convert_to_anonymous_field (location_t location, tree type, tree rhs)
{
tree rhs_struct_type, lhs_main_type;
tree field, found_field;
bool found_sub_field;
tree ret;
gcc_assert (POINTER_TYPE_P (TREE_TYPE (rhs)));
rhs_struct_type = TREE_TYPE (TREE_TYPE (rhs));
gcc_assert (RECORD_OR_UNION_TYPE_P (rhs_struct_type));
gcc_assert (POINTER_TYPE_P (type));
lhs_main_type = (TYPE_ATOMIC (TREE_TYPE (type))
? c_build_qualified_type (TREE_TYPE (type),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (TREE_TYPE (type)));
found_field = NULL_TREE;
found_sub_field = false;
for (field = TYPE_FIELDS (rhs_struct_type);
field != NULL_TREE;
field = TREE_CHAIN (field))
{
if (DECL_NAME (field) != NULL_TREE
|| !RECORD_OR_UNION_TYPE_P (TREE_TYPE (field)))
continue;
tree fieldtype = (TYPE_ATOMIC (TREE_TYPE (field))
? c_build_qualified_type (TREE_TYPE (field),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (TREE_TYPE (field)));
if (comptypes (lhs_main_type, fieldtype))
{
if (found_field != NULL_TREE)
return NULL_TREE;
found_field = field;
}
else if (find_anonymous_field_with_type (TREE_TYPE (field),
lhs_main_type))
{
if (found_field != NULL_TREE)
return NULL_TREE;
found_field = field;
found_sub_field = true;
}
}
if (found_field == NULL_TREE)
return NULL_TREE;
ret = fold_build3_loc (location, COMPONENT_REF, TREE_TYPE (found_field),
build_fold_indirect_ref (rhs), found_field,
NULL_TREE);
ret = build_fold_addr_expr_loc (location, ret);
if (found_sub_field)
{
ret = convert_to_anonymous_field (location, type, ret);
gcc_assert (ret != NULL_TREE);
}
return ret;
}
/* Issue an error message for a bad initializer component.
GMSGID identifies the message.
The component name is taken from the spelling stack. */
static void ATTRIBUTE_GCC_DIAG (2,0)
error_init (location_t loc, const char *gmsgid, ...)
{
char *ofwhat;
auto_diagnostic_group d;
/* The gmsgid may be a format string with %< and %>. */
va_list ap;
va_start (ap, gmsgid);
bool warned = emit_diagnostic_valist (DK_ERROR, loc, -1, gmsgid, &ap);
va_end (ap);
ofwhat = print_spelling ((char *) alloca (spelling_length () + 1));
if (*ofwhat && warned)
inform (loc, "(near initialization for %qs)", ofwhat);
}
/* Issue a pedantic warning for a bad initializer component. OPT is
the option OPT_* (from options.h) controlling this warning or 0 if
it is unconditionally given. GMSGID identifies the message. The
component name is taken from the spelling stack. */
static void ATTRIBUTE_GCC_DIAG (3,0)
pedwarn_init (location_t loc, int opt, const char *gmsgid, ...)
{
/* Use the location where a macro was expanded rather than where
it was defined to make sure macros defined in system headers
but used incorrectly elsewhere are diagnosed. */
location_t exploc = expansion_point_location_if_in_system_header (loc);
auto_diagnostic_group d;
va_list ap;
va_start (ap, gmsgid);
bool warned = emit_diagnostic_valist (DK_PEDWARN, exploc, opt, gmsgid, &ap);
va_end (ap);
char *ofwhat = print_spelling ((char *) alloca (spelling_length () + 1));
if (*ofwhat && warned)
inform (exploc, "(near initialization for %qs)", ofwhat);
}
/* Issue a warning for a bad initializer component.
OPT is the OPT_W* value corresponding to the warning option that
controls this warning. GMSGID identifies the message. The
component name is taken from the spelling stack. */
static void
warning_init (location_t loc, int opt, const char *gmsgid)
{
char *ofwhat;
bool warned;
auto_diagnostic_group d;
/* Use the location where a macro was expanded rather than where
it was defined to make sure macros defined in system headers
but used incorrectly elsewhere are diagnosed. */
location_t exploc = expansion_point_location_if_in_system_header (loc);
/* The gmsgid may be a format string with %< and %>. */
warned = warning_at (exploc, opt, gmsgid);
ofwhat = print_spelling ((char *) alloca (spelling_length () + 1));
if (*ofwhat && warned)
inform (exploc, "(near initialization for %qs)", ofwhat);
}
/* If TYPE is an array type and EXPR is a parenthesized string
constant, warn if pedantic that EXPR is being used to initialize an
object of type TYPE. */
void
maybe_warn_string_init (location_t loc, tree type, struct c_expr expr)
{
if (pedantic
&& TREE_CODE (type) == ARRAY_TYPE
&& TREE_CODE (expr.value) == STRING_CST
&& expr.original_code != STRING_CST)
pedwarn_init (loc, OPT_Wpedantic,
"array initialized from parenthesized string constant");
}
/* Attempt to locate the parameter with the given index within FNDECL,
returning DECL_SOURCE_LOCATION (FNDECL) if it can't be found. */
static location_t
get_fndecl_argument_location (tree fndecl, int argnum)
{
int i;
tree param;
/* Locate param by index within DECL_ARGUMENTS (fndecl). */
for (i = 0, param = DECL_ARGUMENTS (fndecl);
i < argnum && param;
i++, param = TREE_CHAIN (param))
;
/* If something went wrong (e.g. if we have a builtin and thus no arguments),
return DECL_SOURCE_LOCATION (FNDECL). */
if (param == NULL)
return DECL_SOURCE_LOCATION (fndecl);
return DECL_SOURCE_LOCATION (param);
}
/* Issue a note about a mismatching argument for parameter PARMNUM
to FUNDECL, for types EXPECTED_TYPE and ACTUAL_TYPE.
Attempt to issue the note at the pertinent parameter of the decl;
failing that issue it at the location of FUNDECL; failing that
issue it at PLOC. */
static void
inform_for_arg (tree fundecl, location_t ploc, int parmnum,
tree expected_type, tree actual_type)
{
location_t loc;
if (fundecl && !DECL_IS_BUILTIN (fundecl))
loc = get_fndecl_argument_location (fundecl, parmnum - 1);
else
loc = ploc;
inform (loc,
"expected %qT but argument is of type %qT",
expected_type, actual_type);
}
/* Issue a warning when an argument of ARGTYPE is passed to a built-in
function FUNDECL declared without prototype to parameter PARMNUM of
PARMTYPE when ARGTYPE does not promote to PARMTYPE. */
static void
maybe_warn_builtin_no_proto_arg (location_t loc, tree fundecl, int parmnum,
tree parmtype, tree argtype)
{
tree_code parmcode = TREE_CODE (parmtype);
tree_code argcode = TREE_CODE (argtype);
tree promoted = c_type_promotes_to (argtype);
/* Avoid warning for enum arguments that promote to an integer type
of the same size/mode. */
if (parmcode == INTEGER_TYPE
&& argcode == ENUMERAL_TYPE
&& TYPE_MODE (parmtype) == TYPE_MODE (argtype))
return;
if ((parmcode == argcode
|| (parmcode == INTEGER_TYPE
&& argcode == ENUMERAL_TYPE))
&& TYPE_MAIN_VARIANT (parmtype) == TYPE_MAIN_VARIANT (promoted))
return;
/* This diagnoses even signed/unsigned mismatches. Those might be
safe in many cases but GCC may emit suboptimal code for them so
warning on those cases drives efficiency improvements. */
if (warning_at (loc, OPT_Wbuiltin_declaration_mismatch,
TYPE_MAIN_VARIANT (promoted) == argtype
? G_("%qD argument %d type is %qT where %qT is expected "
"in a call to built-in function declared without "
"prototype")
: G_("%qD argument %d promotes to %qT where %qT is expected "
"in a call to built-in function declared without "
"prototype"),
fundecl, parmnum, promoted, parmtype))
inform (DECL_SOURCE_LOCATION (fundecl),
"built-in %qD declared here",
fundecl);
}
/* Convert value RHS to type TYPE as preparation for an assignment to
an lvalue of type TYPE. If ORIGTYPE is not NULL_TREE, it is the
original type of RHS; this differs from TREE_TYPE (RHS) for enum
types. NULL_POINTER_CONSTANT says whether RHS was a null pointer
constant before any folding.
The real work of conversion is done by `convert'.
The purpose of this function is to generate error messages
for assignments that are not allowed in C.
ERRTYPE says whether it is argument passing, assignment,
initialization or return.
In the following example, '~' denotes where EXPR_LOC and '^' where
LOCATION point to:
f (var); [ic_argpass]
^ ~~~
x = var; [ic_assign]
^ ~~~;
int x = var; [ic_init]
^^^
return x; [ic_return]
^
FUNCTION is a tree for the function being called.
PARMNUM is the number of the argument, for printing in error messages.
WARNOPT may be set to a warning option to issue the corresponding warning
rather than an error for invalid conversions. Used for calls to built-in
functions declared without a prototype. */
static tree
convert_for_assignment (location_t location, location_t expr_loc, tree type,
tree rhs, tree origtype, enum impl_conv errtype,
bool null_pointer_constant, tree fundecl,
tree function, int parmnum, int warnopt /* = 0 */)
{
enum tree_code codel = TREE_CODE (type);
tree orig_rhs = rhs;
tree rhstype;
enum tree_code coder;
tree rname = NULL_TREE;
bool objc_ok = false;
/* Use the expansion point location to handle cases such as user's
function returning a wrong-type macro defined in a system header. */
location = expansion_point_location_if_in_system_header (location);
if (errtype == ic_argpass)
{
tree selector;
/* Change pointer to function to the function itself for
diagnostics. */
if (TREE_CODE (function) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (function, 0)) == FUNCTION_DECL)
function = TREE_OPERAND (function, 0);
/* Handle an ObjC selector specially for diagnostics. */
selector = objc_message_selector ();
rname = function;
if (selector && parmnum > 2)
{
rname = selector;
parmnum -= 2;
}
}
/* This macro is used to emit diagnostics to ensure that all format
strings are complete sentences, visible to gettext and checked at
compile time. */
#define PEDWARN_FOR_ASSIGNMENT(LOCATION, PLOC, OPT, AR, AS, IN, RE) \
do { \
switch (errtype) \
{ \
case ic_argpass: \
{ \
auto_diagnostic_group d; \
if (pedwarn (PLOC, OPT, AR, parmnum, rname)) \
inform_for_arg (fundecl, (PLOC), parmnum, type, rhstype); \
} \
break; \
case ic_assign: \
pedwarn (LOCATION, OPT, AS); \
break; \
case ic_init: \
pedwarn_init (LOCATION, OPT, IN); \
break; \
case ic_return: \
pedwarn (LOCATION, OPT, RE); \
break; \
default: \
gcc_unreachable (); \
} \
} while (0)
/* This macro is used to emit diagnostics to ensure that all format
strings are complete sentences, visible to gettext and checked at
compile time. It is the same as PEDWARN_FOR_ASSIGNMENT but with an
extra parameter to enumerate qualifiers. */
#define PEDWARN_FOR_QUALIFIERS(LOCATION, PLOC, OPT, AR, AS, IN, RE, QUALS) \
do { \
switch (errtype) \
{ \
case ic_argpass: \
{ \
auto_diagnostic_group d; \
if (pedwarn (PLOC, OPT, AR, parmnum, rname, QUALS)) \
inform_for_arg (fundecl, (PLOC), parmnum, type, rhstype); \
} \
break; \
case ic_assign: \
pedwarn (LOCATION, OPT, AS, QUALS); \
break; \
case ic_init: \
pedwarn (LOCATION, OPT, IN, QUALS); \
break; \
case ic_return: \
pedwarn (LOCATION, OPT, RE, QUALS); \
break; \
default: \
gcc_unreachable (); \
} \
} while (0)
/* This macro is used to emit diagnostics to ensure that all format
strings are complete sentences, visible to gettext and checked at
compile time. It is the same as PEDWARN_FOR_QUALIFIERS but uses
warning_at instead of pedwarn. */
#define WARNING_FOR_QUALIFIERS(LOCATION, PLOC, OPT, AR, AS, IN, RE, QUALS) \
do { \
switch (errtype) \
{ \
case ic_argpass: \
{ \
auto_diagnostic_group d; \
if (warning_at (PLOC, OPT, AR, parmnum, rname, QUALS)) \
inform_for_arg (fundecl, (PLOC), parmnum, type, rhstype); \
} \
break; \
case ic_assign: \
warning_at (LOCATION, OPT, AS, QUALS); \
break; \
case ic_init: \
warning_at (LOCATION, OPT, IN, QUALS); \
break; \
case ic_return: \
warning_at (LOCATION, OPT, RE, QUALS); \
break; \
default: \
gcc_unreachable (); \
} \
} while (0)
if (TREE_CODE (rhs) == EXCESS_PRECISION_EXPR)
rhs = TREE_OPERAND (rhs, 0);
rhstype = TREE_TYPE (rhs);
coder = TREE_CODE (rhstype);
if (coder == ERROR_MARK)
return error_mark_node;
if (c_dialect_objc ())
{
int parmno;
switch (errtype)
{
case ic_return:
parmno = 0;
break;
case ic_assign:
parmno = -1;
break;
case ic_init:
parmno = -2;
break;
default:
parmno = parmnum;
break;
}
objc_ok = objc_compare_types (type, rhstype, parmno, rname);
}
if (warn_cxx_compat)
{
tree checktype = origtype != NULL_TREE ? origtype : rhstype;
if (checktype != error_mark_node
&& TREE_CODE (type) == ENUMERAL_TYPE
&& TYPE_MAIN_VARIANT (checktype) != TYPE_MAIN_VARIANT (type))
switch (errtype)
{
case ic_argpass:
if (pedwarn (expr_loc, OPT_Wc___compat, "enum conversion when "
"passing argument %d of %qE is invalid in C++",
parmnum, rname))
inform ((fundecl && !DECL_IS_BUILTIN (fundecl))
? DECL_SOURCE_LOCATION (fundecl) : expr_loc,
"expected %qT but argument is of type %qT",
type, rhstype);
break;
case ic_assign:
pedwarn (location, OPT_Wc___compat, "enum conversion from %qT to "
"%qT in assignment is invalid in C++", rhstype, type);
break;
case ic_init:
pedwarn_init (location, OPT_Wc___compat, "enum conversion from "
"%qT to %qT in initialization is invalid in C++",
rhstype, type);
break;
case ic_return:
pedwarn (location, OPT_Wc___compat, "enum conversion from %qT to "
"%qT in return is invalid in C++", rhstype, type);
break;
default:
gcc_unreachable ();
}
}
if (warn_enum_conversion)
{
tree checktype = origtype != NULL_TREE ? origtype : rhstype;
if (checktype != error_mark_node
&& TREE_CODE (checktype) == ENUMERAL_TYPE
&& TREE_CODE (type) == ENUMERAL_TYPE
&& TYPE_MAIN_VARIANT (checktype) != TYPE_MAIN_VARIANT (type))
{
gcc_rich_location loc (location);
warning_at (&loc, OPT_Wenum_conversion,
"implicit conversion from %qT to %qT",
checktype, type);
}
}
if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (rhstype))
{
warn_for_address_or_pointer_of_packed_member (type, orig_rhs);
return rhs;
}
if (coder == VOID_TYPE)
{
/* Except for passing an argument to an unprototyped function,
this is a constraint violation. When passing an argument to
an unprototyped function, it is compile-time undefined;
making it a constraint in that case was rejected in
DR#252. */
const char msg[] = "void value not ignored as it ought to be";
if (warnopt)
warning_at (location, warnopt, msg);
else
error_at (location, msg);
return error_mark_node;
}
rhs = require_complete_type (location, rhs);
if (rhs == error_mark_node)
return error_mark_node;
if (coder == POINTER_TYPE && reject_gcc_builtin (rhs))
return error_mark_node;
/* A non-reference type can convert to a reference. This handles
va_start, va_copy and possibly port built-ins. */
if (codel == REFERENCE_TYPE && coder != REFERENCE_TYPE)
{
if (!lvalue_p (rhs))
{
const char msg[] = "cannot pass rvalue to reference parameter";
if (warnopt)
warning_at (location, warnopt, msg);
else
error_at (location, msg);
return error_mark_node;
}
if (!c_mark_addressable (rhs))
return error_mark_node;
rhs = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (rhs)), rhs);
SET_EXPR_LOCATION (rhs, location);
rhs = convert_for_assignment (location, expr_loc,
build_pointer_type (TREE_TYPE (type)),
rhs, origtype, errtype,
null_pointer_constant, fundecl, function,
parmnum, warnopt);
if (rhs == error_mark_node)
return error_mark_node;
rhs = build1 (NOP_EXPR, type, rhs);
SET_EXPR_LOCATION (rhs, location);
return rhs;
}
/* Some types can interconvert without explicit casts. */
else if (codel == VECTOR_TYPE && coder == VECTOR_TYPE
&& vector_types_convertible_p (type, TREE_TYPE (rhs), true))
return convert (type, rhs);
/* Arithmetic types all interconvert, and enum is treated like int. */
else if ((codel == INTEGER_TYPE || codel == REAL_TYPE
|| codel == FIXED_POINT_TYPE
|| codel == ENUMERAL_TYPE || codel == COMPLEX_TYPE
|| codel == BOOLEAN_TYPE)
&& (coder == INTEGER_TYPE || coder == REAL_TYPE
|| coder == FIXED_POINT_TYPE
|| coder == ENUMERAL_TYPE || coder == COMPLEX_TYPE
|| coder == BOOLEAN_TYPE))
{
if (warnopt && errtype == ic_argpass)
maybe_warn_builtin_no_proto_arg (expr_loc, fundecl, parmnum, type,
rhstype);
bool save = in_late_binary_op;
if (codel == BOOLEAN_TYPE || codel == COMPLEX_TYPE
|| (coder == REAL_TYPE
&& (codel == INTEGER_TYPE || codel == ENUMERAL_TYPE)
&& sanitize_flags_p (SANITIZE_FLOAT_CAST)))
in_late_binary_op = true;
tree ret = convert_and_check (expr_loc != UNKNOWN_LOCATION
? expr_loc : location, type, orig_rhs);
in_late_binary_op = save;
return ret;
}
/* Aggregates in different TUs might need conversion. */
if ((codel == RECORD_TYPE || codel == UNION_TYPE)
&& codel == coder
&& comptypes (type, rhstype))
return convert_and_check (expr_loc != UNKNOWN_LOCATION
? expr_loc : location, type, rhs);
/* Conversion to a transparent union or record from its member types.
This applies only to function arguments. */
if (((codel == UNION_TYPE || codel == RECORD_TYPE)
&& TYPE_TRANSPARENT_AGGR (type))
&& errtype == ic_argpass)
{
tree memb, marginal_memb = NULL_TREE;
for (memb = TYPE_FIELDS (type); memb ; memb = DECL_CHAIN (memb))
{
tree memb_type = TREE_TYPE (memb);
if (comptypes (TYPE_MAIN_VARIANT (memb_type),
TYPE_MAIN_VARIANT (rhstype)))
break;
if (TREE_CODE (memb_type) != POINTER_TYPE)
continue;
if (coder == POINTER_TYPE)
{
tree ttl = TREE_TYPE (memb_type);
tree ttr = TREE_TYPE (rhstype);
/* Any non-function converts to a [const][volatile] void *
and vice versa; otherwise, targets must be the same.
Meanwhile, the lhs target must have all the qualifiers of
the rhs. */
if ((VOID_TYPE_P (ttl) && !TYPE_ATOMIC (ttl))
|| (VOID_TYPE_P (ttr) && !TYPE_ATOMIC (ttr))
|| comp_target_types (location, memb_type, rhstype))
{
int lquals = TYPE_QUALS (ttl) & ~TYPE_QUAL_ATOMIC;
int rquals = TYPE_QUALS (ttr) & ~TYPE_QUAL_ATOMIC;
/* If this type won't generate any warnings, use it. */
if (lquals == rquals
|| ((TREE_CODE (ttr) == FUNCTION_TYPE
&& TREE_CODE (ttl) == FUNCTION_TYPE)
? ((lquals | rquals) == rquals)
: ((lquals | rquals) == lquals)))
break;
/* Keep looking for a better type, but remember this one. */
if (!marginal_memb)
marginal_memb = memb;
}
}
/* Can convert integer zero to any pointer type. */
if (null_pointer_constant)
{
rhs = null_pointer_node;
break;
}
}
if (memb || marginal_memb)
{
if (!memb)
{
/* We have only a marginally acceptable member type;
it needs a warning. */
tree ttl = TREE_TYPE (TREE_TYPE (marginal_memb));
tree ttr = TREE_TYPE (rhstype);
/* Const and volatile mean something different for function
types, so the usual warnings are not appropriate. */
if (TREE_CODE (ttr) == FUNCTION_TYPE
&& TREE_CODE (ttl) == FUNCTION_TYPE)
{
/* Because const and volatile on functions are
restrictions that say the function will not do
certain things, it is okay to use a const or volatile
function where an ordinary one is wanted, but not
vice-versa. */
if (TYPE_QUALS_NO_ADDR_SPACE (ttl)
& ~TYPE_QUALS_NO_ADDR_SPACE (ttr))
PEDWARN_FOR_QUALIFIERS (location, expr_loc,
OPT_Wdiscarded_qualifiers,
G_("passing argument %d of %qE "
"makes %q#v qualified function "
"pointer from unqualified"),
G_("assignment makes %q#v qualified "
"function pointer from "
"unqualified"),
G_("initialization makes %q#v qualified "
"function pointer from "
"unqualified"),
G_("return makes %q#v qualified function "
"pointer from unqualified"),
TYPE_QUALS (ttl) & ~TYPE_QUALS (ttr));
}
else if (TYPE_QUALS_NO_ADDR_SPACE (ttr)
& ~TYPE_QUALS_NO_ADDR_SPACE (ttl))
PEDWARN_FOR_QUALIFIERS (location, expr_loc,
OPT_Wdiscarded_qualifiers,
G_("passing argument %d of %qE discards "
"%qv qualifier from pointer target type"),
G_("assignment discards %qv qualifier "
"from pointer target type"),
G_("initialization discards %qv qualifier "
"from pointer target type"),
G_("return discards %qv qualifier from "
"pointer target type"),
TYPE_QUALS (ttr) & ~TYPE_QUALS (ttl));
memb = marginal_memb;
}
if (!fundecl || !DECL_IN_SYSTEM_HEADER (fundecl))
pedwarn (location, OPT_Wpedantic,
"ISO C prohibits argument conversion to union type");
rhs = fold_convert_loc (location, TREE_TYPE (memb), rhs);
return build_constructor_single (type, memb, rhs);
}
}
/* Conversions among pointers */
else if ((codel == POINTER_TYPE || codel == REFERENCE_TYPE)
&& (coder == codel))
{
/* If RHS refers to a built-in declared without a prototype
BLTIN is the declaration of the built-in with a prototype
and RHSTYPE is set to the actual type of the built-in. */
tree bltin;
rhstype = type_or_builtin_type (rhs, &bltin);
tree ttl = TREE_TYPE (type);
tree ttr = TREE_TYPE (rhstype);
tree mvl = ttl;
tree mvr = ttr;
bool is_opaque_pointer;
int target_cmp = 0; /* Cache comp_target_types () result. */
addr_space_t asl;
addr_space_t asr;
if (TREE_CODE (mvl) != ARRAY_TYPE)
mvl = (TYPE_ATOMIC (mvl)
? c_build_qualified_type (TYPE_MAIN_VARIANT (mvl),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (mvl));
if (TREE_CODE (mvr) != ARRAY_TYPE)
mvr = (TYPE_ATOMIC (mvr)
? c_build_qualified_type (TYPE_MAIN_VARIANT (mvr),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (mvr));
/* Opaque pointers are treated like void pointers. */
is_opaque_pointer = vector_targets_convertible_p (ttl, ttr);
/* The Plan 9 compiler permits a pointer to a struct to be
automatically converted into a pointer to an anonymous field
within the struct. */
if (flag_plan9_extensions
&& RECORD_OR_UNION_TYPE_P (mvl)
&& RECORD_OR_UNION_TYPE_P (mvr)
&& mvl != mvr)
{
tree new_rhs = convert_to_anonymous_field (location, type, rhs);
if (new_rhs != NULL_TREE)
{
rhs = new_rhs;
rhstype = TREE_TYPE (rhs);
coder = TREE_CODE (rhstype);
ttr = TREE_TYPE (rhstype);
mvr = TYPE_MAIN_VARIANT (ttr);
}
}
/* C++ does not allow the implicit conversion void* -> T*. However,
for the purpose of reducing the number of false positives, we
tolerate the special case of
int *p = NULL;
where NULL is typically defined in C to be '(void *) 0'. */
if (VOID_TYPE_P (ttr) && rhs != null_pointer_node && !VOID_TYPE_P (ttl))
warning_at (errtype == ic_argpass ? expr_loc : location,
OPT_Wc___compat,
"request for implicit conversion "
"from %qT to %qT not permitted in C++", rhstype, type);
/* See if the pointers point to incompatible address spaces. */
asl = TYPE_ADDR_SPACE (ttl);
asr = TYPE_ADDR_SPACE (ttr);
if (!null_pointer_constant_p (rhs)
&& asr != asl && !targetm.addr_space.subset_p (asr, asl))
{
switch (errtype)
{
case ic_argpass:
{
const char msg[] = G_("passing argument %d of %qE from "
"pointer to non-enclosed address space");
if (warnopt)
warning_at (expr_loc, warnopt, msg, parmnum, rname);
else
error_at (expr_loc, msg, parmnum, rname);
break;
}
case ic_assign:
{
const char msg[] = G_("assignment from pointer to "
"non-enclosed address space");
if (warnopt)
warning_at (location, warnopt, msg);
else
error_at (location, msg);
break;
}
case ic_init:
{
const char msg[] = G_("initialization from pointer to "
"non-enclosed address space");
if (warnopt)
warning_at (location, warnopt, msg);
else
error_at (location, msg);
break;
}
case ic_return:
{
const char msg[] = G_("return from pointer to "
"non-enclosed address space");
if (warnopt)
warning_at (location, warnopt, msg);
else
error_at (location, msg);
break;
}
default:
gcc_unreachable ();
}
return error_mark_node;
}
/* Check if the right-hand side has a format attribute but the
left-hand side doesn't. */
if (warn_suggest_attribute_format
&& check_missing_format_attribute (type, rhstype))
{
switch (errtype)
{
case ic_argpass:
warning_at (expr_loc, OPT_Wsuggest_attribute_format,
"argument %d of %qE might be "
"a candidate for a format attribute",
parmnum, rname);
break;
case ic_assign:
warning_at (location, OPT_Wsuggest_attribute_format,
"assignment left-hand side might be "
"a candidate for a format attribute");
break;
case ic_init:
warning_at (location, OPT_Wsuggest_attribute_format,
"initialization left-hand side might be "
"a candidate for a format attribute");
break;
case ic_return:
warning_at (location, OPT_Wsuggest_attribute_format,
"return type might be "
"a candidate for a format attribute");
break;
default:
gcc_unreachable ();
}
}
/* Any non-function converts to a [const][volatile] void *
and vice versa; otherwise, targets must be the same.
Meanwhile, the lhs target must have all the qualifiers of the rhs. */
if ((VOID_TYPE_P (ttl) && !TYPE_ATOMIC (ttl))
|| (VOID_TYPE_P (ttr) && !TYPE_ATOMIC (ttr))
|| (target_cmp = comp_target_types (location, type, rhstype))
|| is_opaque_pointer
|| ((c_common_unsigned_type (mvl)
== c_common_unsigned_type (mvr))
&& (c_common_signed_type (mvl)
== c_common_signed_type (mvr))
&& TYPE_ATOMIC (mvl) == TYPE_ATOMIC (mvr)))
{
/* Warn about loss of qualifers from pointers to arrays with
qualifiers on the element type. */
if (TREE_CODE (ttr) == ARRAY_TYPE)
{
ttr = strip_array_types (ttr);
ttl = strip_array_types (ttl);
if (TYPE_QUALS_NO_ADDR_SPACE_NO_ATOMIC (ttr)
& ~TYPE_QUALS_NO_ADDR_SPACE_NO_ATOMIC (ttl))
WARNING_FOR_QUALIFIERS (location, expr_loc,
OPT_Wdiscarded_array_qualifiers,
G_("passing argument %d of %qE discards "
"%qv qualifier from pointer target type"),
G_("assignment discards %qv qualifier "
"from pointer target type"),
G_("initialization discards %qv qualifier "
"from pointer target type"),
G_("return discards %qv qualifier from "
"pointer target type"),
TYPE_QUALS (ttr) & ~TYPE_QUALS (ttl));
}
else if (pedantic
&& ((VOID_TYPE_P (ttl) && TREE_CODE (ttr) == FUNCTION_TYPE)
||
(VOID_TYPE_P (ttr)
&& !null_pointer_constant
&& TREE_CODE (ttl) == FUNCTION_TYPE)))
PEDWARN_FOR_ASSIGNMENT (location, expr_loc, OPT_Wpedantic,
G_("ISO C forbids passing argument %d of "
"%qE between function pointer "
"and %<void *%>"),
G_("ISO C forbids assignment between "
"function pointer and %<void *%>"),
G_("ISO C forbids initialization between "
"function pointer and %<void *%>"),
G_("ISO C forbids return between function "
"pointer and %<void *%>"));
/* Const and volatile mean something different for function types,
so the usual warnings are not appropriate. */
else if (TREE_CODE (ttr) != FUNCTION_TYPE
&& TREE_CODE (ttl) != FUNCTION_TYPE)
{
/* Don't warn about loss of qualifier for conversions from
qualified void* to pointers to arrays with corresponding
qualifier on the element type. */
if (!pedantic)
ttl = strip_array_types (ttl);
/* Assignments between atomic and non-atomic objects are OK. */
if (TYPE_QUALS_NO_ADDR_SPACE_NO_ATOMIC (ttr)
& ~TYPE_QUALS_NO_ADDR_SPACE_NO_ATOMIC (ttl))
{
PEDWARN_FOR_QUALIFIERS (location, expr_loc,
OPT_Wdiscarded_qualifiers,
G_("passing argument %d of %qE discards "
"%qv qualifier from pointer target type"),
G_("assignment discards %qv qualifier "
"from pointer target type"),
G_("initialization discards %qv qualifier "
"from pointer target type"),
G_("return discards %qv qualifier from "
"pointer target type"),
TYPE_QUALS (ttr) & ~TYPE_QUALS (ttl));
}
/* If this is not a case of ignoring a mismatch in signedness,
no warning. */
else if (VOID_TYPE_P (ttl) || VOID_TYPE_P (ttr)
|| target_cmp)
;
/* If there is a mismatch, do warn. */
else if (warn_pointer_sign)
switch (errtype)
{
case ic_argpass:
{
auto_diagnostic_group d;
range_label_for_type_mismatch rhs_label (rhstype, type);
gcc_rich_location richloc (expr_loc, &rhs_label);
if (pedwarn (&richloc, OPT_Wpointer_sign,
"pointer targets in passing argument %d of "
"%qE differ in signedness", parmnum, rname))
inform_for_arg (fundecl, expr_loc, parmnum, type,
rhstype);
}
break;
case ic_assign:
pedwarn (location, OPT_Wpointer_sign,
"pointer targets in assignment from %qT to %qT "
"differ in signedness", rhstype, type);
break;
case ic_init:
pedwarn_init (location, OPT_Wpointer_sign,
"pointer targets in initialization of %qT "
"from %qT differ in signedness", type,
rhstype);
break;
case ic_return:
pedwarn (location, OPT_Wpointer_sign, "pointer targets in "
"returning %qT from a function with return type "
"%qT differ in signedness", rhstype, type);
break;
default:
gcc_unreachable ();
}
}
else if (TREE_CODE (ttl) == FUNCTION_TYPE
&& TREE_CODE (ttr) == FUNCTION_TYPE)
{
/* Because const and volatile on functions are restrictions
that say the function will not do certain things,
it is okay to use a const or volatile function
where an ordinary one is wanted, but not vice-versa. */
if (TYPE_QUALS_NO_ADDR_SPACE (ttl)
& ~TYPE_QUALS_NO_ADDR_SPACE (ttr))
PEDWARN_FOR_QUALIFIERS (location, expr_loc,
OPT_Wdiscarded_qualifiers,
G_("passing argument %d of %qE makes "
"%q#v qualified function pointer "
"from unqualified"),
G_("assignment makes %q#v qualified function "
"pointer from unqualified"),
G_("initialization makes %q#v qualified "
"function pointer from unqualified"),
G_("return makes %q#v qualified function "
"pointer from unqualified"),
TYPE_QUALS (ttl) & ~TYPE_QUALS (ttr));
}
}
/* Avoid warning about the volatile ObjC EH puts on decls. */
else if (!objc_ok)
{
switch (errtype)
{
case ic_argpass:
{
auto_diagnostic_group d;
range_label_for_type_mismatch rhs_label (rhstype, type);
gcc_rich_location richloc (expr_loc, &rhs_label);
if (pedwarn (&richloc, OPT_Wincompatible_pointer_types,
"passing argument %d of %qE from incompatible "
"pointer type", parmnum, rname))
inform_for_arg (fundecl, expr_loc, parmnum, type, rhstype);
}
break;
case ic_assign:
if (bltin)
pedwarn (location, OPT_Wincompatible_pointer_types,
"assignment to %qT from pointer to "
"%qD with incompatible type %qT",
type, bltin, rhstype);
else
pedwarn (location, OPT_Wincompatible_pointer_types,
"assignment to %qT from incompatible pointer type %qT",
type, rhstype);
break;
case ic_init:
if (bltin)
pedwarn_init (location, OPT_Wincompatible_pointer_types,
"initialization of %qT from pointer to "
"%qD with incompatible type %qT",
type, bltin, rhstype);
else
pedwarn_init (location, OPT_Wincompatible_pointer_types,
"initialization of %qT from incompatible "
"pointer type %qT",
type, rhstype);
break;
case ic_return:
if (bltin)
pedwarn (location, OPT_Wincompatible_pointer_types,
"returning pointer to %qD of type %qT from "
"a function with incompatible type %qT",
bltin, rhstype, type);
else
pedwarn (location, OPT_Wincompatible_pointer_types,
"returning %qT from a function with incompatible "
"return type %qT", rhstype, type);
break;
default:
gcc_unreachable ();
}
}
/* If RHS isn't an address, check pointer or array of packed
struct or union. */
warn_for_address_or_pointer_of_packed_member (type, orig_rhs);
return convert (type, rhs);
}
else if (codel == POINTER_TYPE && coder == ARRAY_TYPE)
{
/* ??? This should not be an error when inlining calls to
unprototyped functions. */
const char msg[] = "invalid use of non-lvalue array";
if (warnopt)
warning_at (location, warnopt, msg);
else
error_at (location, msg);
return error_mark_node;
}
else if (codel == POINTER_TYPE && coder == INTEGER_TYPE)
{
/* An explicit constant 0 can convert to a pointer,
or one that results from arithmetic, even including
a cast to integer type. */
if (!null_pointer_constant)
switch (errtype)
{
case ic_argpass:
{
auto_diagnostic_group d;
range_label_for_type_mismatch rhs_label (rhstype, type);
gcc_rich_location richloc (expr_loc, &rhs_label);
if (pedwarn (&richloc, OPT_Wint_conversion,
"passing argument %d of %qE makes pointer from "
"integer without a cast", parmnum, rname))
inform_for_arg (fundecl, expr_loc, parmnum, type, rhstype);
}
break;
case ic_assign:
pedwarn (location, OPT_Wint_conversion,
"assignment to %qT from %qT makes pointer from integer "
"without a cast", type, rhstype);
break;
case ic_init:
pedwarn_init (location, OPT_Wint_conversion,
"initialization of %qT from %qT makes pointer from "
"integer without a cast", type, rhstype);
break;
case ic_return:
pedwarn (location, OPT_Wint_conversion, "returning %qT from a "
"function with return type %qT makes pointer from "
"integer without a cast", rhstype, type);
break;
default:
gcc_unreachable ();
}
return convert (type, rhs);
}
else if (codel == INTEGER_TYPE && coder == POINTER_TYPE)
{
switch (errtype)
{
case ic_argpass:
{
auto_diagnostic_group d;
range_label_for_type_mismatch rhs_label (rhstype, type);
gcc_rich_location richloc (expr_loc, &rhs_label);
if (pedwarn (&richloc, OPT_Wint_conversion,
"passing argument %d of %qE makes integer from "
"pointer without a cast", parmnum, rname))
inform_for_arg (fundecl, expr_loc, parmnum, type, rhstype);
}
break;
case ic_assign:
pedwarn (location, OPT_Wint_conversion,
"assignment to %qT from %qT makes integer from pointer "
"without a cast", type, rhstype);
break;
case ic_init:
pedwarn_init (location, OPT_Wint_conversion,
"initialization of %qT from %qT makes integer from "
"pointer without a cast", type, rhstype);
break;
case ic_return:
pedwarn (location, OPT_Wint_conversion, "returning %qT from a "
"function with return type %qT makes integer from "
"pointer without a cast", rhstype, type);
break;
default:
gcc_unreachable ();
}
return convert (type, rhs);
}
else if (codel == BOOLEAN_TYPE && coder == POINTER_TYPE)
{
tree ret;
bool save = in_late_binary_op;
in_late_binary_op = true;
ret = convert (type, rhs);
in_late_binary_op = save;
return ret;
}
switch (errtype)
{
case ic_argpass:
{
auto_diagnostic_group d;
range_label_for_type_mismatch rhs_label (rhstype, type);
gcc_rich_location richloc (expr_loc, &rhs_label);
const char msg[] = G_("incompatible type for argument %d of %qE");
if (warnopt)
warning_at (expr_loc, warnopt, msg, parmnum, rname);
else
error_at (&richloc, msg, parmnum, rname);
inform_for_arg (fundecl, expr_loc, parmnum, type, rhstype);
}
break;
case ic_assign:
{
const char msg[]
= G_("incompatible types when assigning to type %qT from type %qT");
if (warnopt)
warning_at (expr_loc, 0, msg, type, rhstype);
else
error_at (expr_loc, msg, type, rhstype);
break;
}
case ic_init:
{
const char msg[]
= G_("incompatible types when initializing type %qT using type %qT");
if (warnopt)
warning_at (location, 0, msg, type, rhstype);
else
error_at (location, msg, type, rhstype);
break;
}
case ic_return:
{
const char msg[]
= G_("incompatible types when returning type %qT but %qT was expected");
if (warnopt)
warning_at (location, 0, msg, rhstype, type);
else
error_at (location, msg, rhstype, type);
break;
}
default:
gcc_unreachable ();
}
return error_mark_node;
}
/* If VALUE is a compound expr all of whose expressions are constant, then
return its value. Otherwise, return error_mark_node.
This is for handling COMPOUND_EXPRs as initializer elements
which is allowed with a warning when -pedantic is specified. */
static tree
valid_compound_expr_initializer (tree value, tree endtype)
{
if (TREE_CODE (value) == COMPOUND_EXPR)
{
if (valid_compound_expr_initializer (TREE_OPERAND (value, 0), endtype)
== error_mark_node)
return error_mark_node;
return valid_compound_expr_initializer (TREE_OPERAND (value, 1),
endtype);
}
else if (!initializer_constant_valid_p (value, endtype))
return error_mark_node;
else
return value;
}
/* Perform appropriate conversions on the initial value of a variable,
store it in the declaration DECL,
and print any error messages that are appropriate.
If ORIGTYPE is not NULL_TREE, it is the original type of INIT.
If the init is invalid, store an ERROR_MARK.
INIT_LOC is the location of the initial value. */
void
store_init_value (location_t init_loc, tree decl, tree init, tree origtype)
{
tree value, type;
bool npc = false;
/* If variable's type was invalidly declared, just ignore it. */
type = TREE_TYPE (decl);
if (TREE_CODE (type) == ERROR_MARK)
return;
/* Digest the specified initializer into an expression. */
if (init)
npc = null_pointer_constant_p (init);
value = digest_init (init_loc, type, init, origtype, npc,
true, TREE_STATIC (decl));
/* Store the expression if valid; else report error. */
if (!in_system_header_at (input_location)
&& AGGREGATE_TYPE_P (TREE_TYPE (decl)) && !TREE_STATIC (decl))
warning (OPT_Wtraditional, "traditional C rejects automatic "
"aggregate initialization");
if (value != error_mark_node || TREE_CODE (decl) != FUNCTION_DECL)
DECL_INITIAL (decl) = value;
/* ANSI wants warnings about out-of-range constant initializers. */
STRIP_TYPE_NOPS (value);
if (TREE_STATIC (decl))
constant_expression_warning (value);
/* Check if we need to set array size from compound literal size. */
if (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type) == NULL_TREE
&& value != error_mark_node)
{
tree inside_init = init;
STRIP_TYPE_NOPS (inside_init);
inside_init = fold (inside_init);
if (TREE_CODE (inside_init) == COMPOUND_LITERAL_EXPR)
{
tree cldecl = COMPOUND_LITERAL_EXPR_DECL (inside_init);
if (TYPE_DOMAIN (TREE_TYPE (cldecl)))
{
/* For int foo[] = (int [3]){1}; we need to set array size
now since later on array initializer will be just the
brace enclosed list of the compound literal. */
tree etype = strip_array_types (TREE_TYPE (decl));
type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
TYPE_DOMAIN (type) = TYPE_DOMAIN (TREE_TYPE (cldecl));
layout_type (type);
layout_decl (cldecl, 0);
TREE_TYPE (decl)
= c_build_qualified_type (type, TYPE_QUALS (etype));
}
}
}
}
/* Methods for storing and printing names for error messages. */
/* Implement a spelling stack that allows components of a name to be pushed
and popped. Each element on the stack is this structure. */
struct spelling
{
int kind;
union
{
unsigned HOST_WIDE_INT i;
const char *s;
} u;
};
#define SPELLING_STRING 1
#define SPELLING_MEMBER 2
#define SPELLING_BOUNDS 3
static struct spelling *spelling; /* Next stack element (unused). */
static struct spelling *spelling_base; /* Spelling stack base. */
static int spelling_size; /* Size of the spelling stack. */
/* Macros to save and restore the spelling stack around push_... functions.
Alternative to SAVE_SPELLING_STACK. */
#define SPELLING_DEPTH() (spelling - spelling_base)
#define RESTORE_SPELLING_DEPTH(DEPTH) (spelling = spelling_base + (DEPTH))
/* Push an element on the spelling stack with type KIND and assign VALUE
to MEMBER. */
#define PUSH_SPELLING(KIND, VALUE, MEMBER) \
{ \
int depth = SPELLING_DEPTH (); \
\
if (depth >= spelling_size) \
{ \
spelling_size += 10; \
spelling_base = XRESIZEVEC (struct spelling, spelling_base, \
spelling_size); \
RESTORE_SPELLING_DEPTH (depth); \
} \
\
spelling->kind = (KIND); \
spelling->MEMBER = (VALUE); \
spelling++; \
}
/* Push STRING on the stack. Printed literally. */
static void
push_string (const char *string)
{
PUSH_SPELLING (SPELLING_STRING, string, u.s);
}
/* Push a member name on the stack. Printed as '.' STRING. */
static void
push_member_name (tree decl)
{
const char *const string
= (DECL_NAME (decl)
? identifier_to_locale (IDENTIFIER_POINTER (DECL_NAME (decl)))
: _("<anonymous>"));
PUSH_SPELLING (SPELLING_MEMBER, string, u.s);
}
/* Push an array bounds on the stack. Printed as [BOUNDS]. */
static void
push_array_bounds (unsigned HOST_WIDE_INT bounds)
{
PUSH_SPELLING (SPELLING_BOUNDS, bounds, u.i);
}
/* Compute the maximum size in bytes of the printed spelling. */
static int
spelling_length (void)
{
int size = 0;
struct spelling *p;
for (p = spelling_base; p < spelling; p++)
{
if (p->kind == SPELLING_BOUNDS)
size += 25;
else
size += strlen (p->u.s) + 1;
}
return size;
}
/* Print the spelling to BUFFER and return it. */
static char *
print_spelling (char *buffer)
{
char *d = buffer;
struct spelling *p;
for (p = spelling_base; p < spelling; p++)
if (p->kind == SPELLING_BOUNDS)
{
sprintf (d, "[" HOST_WIDE_INT_PRINT_UNSIGNED "]", p->u.i);
d += strlen (d);
}
else
{
const char *s;
if (p->kind == SPELLING_MEMBER)
*d++ = '.';
for (s = p->u.s; (*d = *s++); d++)
;
}
*d++ = '\0';
return buffer;
}
/* Digest the parser output INIT as an initializer for type TYPE.
Return a C expression of type TYPE to represent the initial value.
If ORIGTYPE is not NULL_TREE, it is the original type of INIT.
NULL_POINTER_CONSTANT is true if INIT is a null pointer constant.
If INIT is a string constant, STRICT_STRING is true if it is
unparenthesized or we should not warn here for it being parenthesized.
For other types of INIT, STRICT_STRING is not used.
INIT_LOC is the location of the INIT.
REQUIRE_CONSTANT requests an error if non-constant initializers or
elements are seen. */
static tree
digest_init (location_t init_loc, tree type, tree init, tree origtype,
bool null_pointer_constant, bool strict_string,
int require_constant)
{
enum tree_code code = TREE_CODE (type);
tree inside_init = init;
tree semantic_type = NULL_TREE;
bool maybe_const = true;
if (type == error_mark_node
|| !init
|| error_operand_p (init))
return error_mark_node;
STRIP_TYPE_NOPS (inside_init);
if (TREE_CODE (inside_init) == EXCESS_PRECISION_EXPR)
{
semantic_type = TREE_TYPE (inside_init);
inside_init = TREE_OPERAND (inside_init, 0);
}
inside_init = c_fully_fold (inside_init, require_constant, &maybe_const);
/* Initialization of an array of chars from a string constant
optionally enclosed in braces. */
if (code == ARRAY_TYPE && inside_init
&& TREE_CODE (inside_init) == STRING_CST)
{
tree typ1
= (TYPE_ATOMIC (TREE_TYPE (type))
? c_build_qualified_type (TYPE_MAIN_VARIANT (TREE_TYPE (type)),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (TREE_TYPE (type)));
/* Note that an array could be both an array of character type
and an array of wchar_t if wchar_t is signed char or unsigned
char. */
bool char_array = (typ1 == char_type_node
|| typ1 == signed_char_type_node
|| typ1 == unsigned_char_type_node);
bool wchar_array = !!comptypes (typ1, wchar_type_node);
bool char16_array = !!comptypes (typ1, char16_type_node);
bool char32_array = !!comptypes (typ1, char32_type_node);
if (char_array || wchar_array || char16_array || char32_array)
{
struct c_expr expr;
tree typ2 = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (inside_init)));
bool incompat_string_cst = false;
expr.value = inside_init;
expr.original_code = (strict_string ? STRING_CST : ERROR_MARK);
expr.original_type = NULL;
maybe_warn_string_init (init_loc, type, expr);
if (TYPE_DOMAIN (type) && !TYPE_MAX_VALUE (TYPE_DOMAIN (type)))
pedwarn_init (init_loc, OPT_Wpedantic,
"initialization of a flexible array member");
if (comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)),
TYPE_MAIN_VARIANT (type)))
return inside_init;
if (char_array)
{
if (typ2 != char_type_node)
incompat_string_cst = true;
}
else if (!comptypes (typ1, typ2))
incompat_string_cst = true;
if (incompat_string_cst)
{
error_init (init_loc, "cannot initialize array of %qT from "
"a string literal with type array of %qT",
typ1, typ2);
return error_mark_node;
}
if (TYPE_DOMAIN (type) != NULL_TREE
&& TYPE_SIZE (type) != NULL_TREE
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
{
unsigned HOST_WIDE_INT len = TREE_STRING_LENGTH (inside_init);
unsigned unit = TYPE_PRECISION (typ1) / BITS_PER_UNIT;
/* Subtract the size of a single (possibly wide) character
because it's ok to ignore the terminating null char
that is counted in the length of the constant. */
if (compare_tree_int (TYPE_SIZE_UNIT (type), len - unit) < 0)
pedwarn_init (init_loc, 0,
("initializer-string for array of %qT "
"is too long"), typ1);
else if (warn_cxx_compat
&& compare_tree_int (TYPE_SIZE_UNIT (type), len) < 0)
warning_at (init_loc, OPT_Wc___compat,
("initializer-string for array of %qT "
"is too long for C++"), typ1);
if (compare_tree_int (TYPE_SIZE_UNIT (type), len) < 0)
{
unsigned HOST_WIDE_INT size
= tree_to_uhwi (TYPE_SIZE_UNIT (type));
const char *p = TREE_STRING_POINTER (inside_init);
inside_init = build_string (size, p);
}
}
TREE_TYPE (inside_init) = type;
return inside_init;
}
else if (INTEGRAL_TYPE_P (typ1))
{
error_init (init_loc, "array of inappropriate type initialized "
"from string constant");
return error_mark_node;
}
}
/* Build a VECTOR_CST from a *constant* vector constructor. If the
vector constructor is not constant (e.g. {1,2,3,foo()}) then punt
below and handle as a constructor. */
if (code == VECTOR_TYPE
&& VECTOR_TYPE_P (TREE_TYPE (inside_init))
&& vector_types_convertible_p (TREE_TYPE (inside_init), type, true)
&& TREE_CONSTANT (inside_init))
{
if (TREE_CODE (inside_init) == VECTOR_CST
&& comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)),
TYPE_MAIN_VARIANT (type)))
return inside_init;
if (TREE_CODE (inside_init) == CONSTRUCTOR)
{
unsigned HOST_WIDE_INT ix;
tree value;
bool constant_p = true;
/* Iterate through elements and check if all constructor
elements are *_CSTs. */
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (inside_init), ix, value)
if (!CONSTANT_CLASS_P (value))
{
constant_p = false;
break;
}
if (constant_p)
return build_vector_from_ctor (type,
CONSTRUCTOR_ELTS (inside_init));
}
}
if (warn_sequence_point)
verify_sequence_points (inside_init);
/* Any type can be initialized
from an expression of the same type, optionally with braces. */
if (inside_init && TREE_TYPE (inside_init) != NULL_TREE
&& (comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)),
TYPE_MAIN_VARIANT (type))
|| (code == ARRAY_TYPE
&& comptypes (TREE_TYPE (inside_init), type))
|| (gnu_vector_type_p (type)
&& comptypes (TREE_TYPE (inside_init), type))
|| (code == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (inside_init)) == ARRAY_TYPE
&& comptypes (TREE_TYPE (TREE_TYPE (inside_init)),
TREE_TYPE (type)))))
{
if (code == POINTER_TYPE)
{
if (TREE_CODE (TREE_TYPE (inside_init)) == ARRAY_TYPE)
{
if (TREE_CODE (inside_init) == STRING_CST
|| TREE_CODE (inside_init) == COMPOUND_LITERAL_EXPR)
inside_init = array_to_pointer_conversion
(init_loc, inside_init);
else
{
error_init (init_loc, "invalid use of non-lvalue array");
return error_mark_node;
}
}
}
if (code == VECTOR_TYPE)
/* Although the types are compatible, we may require a
conversion. */
inside_init = convert (type, inside_init);
if (require_constant
&& TREE_CODE (inside_init) == COMPOUND_LITERAL_EXPR)
{
/* As an extension, allow initializing objects with static storage
duration with compound literals (which are then treated just as
the brace enclosed list they contain). Also allow this for
vectors, as we can only assign them with compound literals. */
if (flag_isoc99 && code != VECTOR_TYPE)
pedwarn_init (init_loc, OPT_Wpedantic, "initializer element "
"is not constant");
tree decl = COMPOUND_LITERAL_EXPR_DECL (inside_init);
inside_init = DECL_INITIAL (decl);
}
if (code == ARRAY_TYPE && TREE_CODE (inside_init) != STRING_CST
&& TREE_CODE (inside_init) != CONSTRUCTOR)
{
error_init (init_loc, "array initialized from non-constant array "
"expression");
return error_mark_node;
}
/* Compound expressions can only occur here if -Wpedantic or
-pedantic-errors is specified. In the later case, we always want
an error. In the former case, we simply want a warning. */
if (require_constant && pedantic
&& TREE_CODE (inside_init) == COMPOUND_EXPR)
{
inside_init
= valid_compound_expr_initializer (inside_init,
TREE_TYPE (inside_init));
if (inside_init == error_mark_node)
error_init (init_loc, "initializer element is not constant");
else
pedwarn_init (init_loc, OPT_Wpedantic,
"initializer element is not constant");
if (flag_pedantic_errors)
inside_init = error_mark_node;
}
else if (require_constant
&& !initializer_constant_valid_p (inside_init,
TREE_TYPE (inside_init)))
{
error_init (init_loc, "initializer element is not constant");
inside_init = error_mark_node;
}
else if (require_constant && !maybe_const)
pedwarn_init (init_loc, OPT_Wpedantic,
"initializer element is not a constant expression");
/* Added to enable additional -Wsuggest-attribute=format warnings. */
if (TREE_CODE (TREE_TYPE (inside_init)) == POINTER_TYPE)
inside_init = convert_for_assignment (init_loc, UNKNOWN_LOCATION,
type, inside_init, origtype,
ic_init, null_pointer_constant,
NULL_TREE, NULL_TREE, 0);
return inside_init;
}
/* Handle scalar types, including conversions. */
if (code == INTEGER_TYPE || code == REAL_TYPE || code == FIXED_POINT_TYPE
|| code == POINTER_TYPE || code == ENUMERAL_TYPE || code == BOOLEAN_TYPE
|| code == COMPLEX_TYPE || code == VECTOR_TYPE)
{
if (TREE_CODE (TREE_TYPE (init)) == ARRAY_TYPE
&& (TREE_CODE (init) == STRING_CST
|| TREE_CODE (init) == COMPOUND_LITERAL_EXPR))
inside_init = init = array_to_pointer_conversion (init_loc, init);
if (semantic_type)
inside_init = build1 (EXCESS_PRECISION_EXPR, semantic_type,
inside_init);
inside_init
= convert_for_assignment (init_loc, UNKNOWN_LOCATION, type,
inside_init, origtype, ic_init,
null_pointer_constant, NULL_TREE, NULL_TREE,
0);
/* Check to see if we have already given an error message. */
if (inside_init == error_mark_node)
;
else if (require_constant && !TREE_CONSTANT (inside_init))
{
error_init (init_loc, "initializer element is not constant");
inside_init = error_mark_node;
}
else if (require_constant
&& !initializer_constant_valid_p (inside_init,
TREE_TYPE (inside_init)))
{
error_init (init_loc, "initializer element is not computable at "
"load time");
inside_init = error_mark_node;
}
else if (require_constant && !maybe_const)
pedwarn_init (init_loc, OPT_Wpedantic,
"initializer element is not a constant expression");
return inside_init;
}
/* Come here only for records and arrays. */
if (COMPLETE_TYPE_P (type) && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
{
error_init (init_loc, "variable-sized object may not be initialized");
return error_mark_node;
}
error_init (init_loc, "invalid initializer");
return error_mark_node;
}
/* Handle initializers that use braces. */
/* Type of object we are accumulating a constructor for.
This type is always a RECORD_TYPE, UNION_TYPE or ARRAY_TYPE. */
static tree constructor_type;
/* For a RECORD_TYPE or UNION_TYPE, this is the chain of fields
left to fill. */
static tree constructor_fields;
/* For an ARRAY_TYPE, this is the specified index
at which to store the next element we get. */
static tree constructor_index;
/* For an ARRAY_TYPE, this is the maximum index. */
static tree constructor_max_index;
/* For a RECORD_TYPE, this is the first field not yet written out. */
static tree constructor_unfilled_fields;
/* For an ARRAY_TYPE, this is the index of the first element
not yet written out. */
static tree constructor_unfilled_index;
/* In a RECORD_TYPE, the byte index of the next consecutive field.
This is so we can generate gaps between fields, when appropriate. */
static tree constructor_bit_index;
/* If we are saving up the elements rather than allocating them,
this is the list of elements so far (in reverse order,
most recent first). */
static vec<constructor_elt, va_gc> *constructor_elements;
/* 1 if constructor should be incrementally stored into a constructor chain,
0 if all the elements should be kept in AVL tree. */
static int constructor_incremental;
/* 1 if so far this constructor's elements are all compile-time constants. */
static int constructor_constant;
/* 1 if so far this constructor's elements are all valid address constants. */
static int constructor_simple;
/* 1 if this constructor has an element that cannot be part of a
constant expression. */
static int constructor_nonconst;
/* 1 if this constructor is erroneous so far. */
static int constructor_erroneous;
/* 1 if this constructor is the universal zero initializer { 0 }. */
static int constructor_zeroinit;
/* Structure for managing pending initializer elements, organized as an
AVL tree. */
struct init_node
{
struct init_node *left, *right;
struct init_node *parent;
int balance;
tree purpose;
tree value;
tree origtype;
};
/* Tree of pending elements at this constructor level.
These are elements encountered out of order
which belong at places we haven't reached yet in actually
writing the output.
Will never hold tree nodes across GC runs. */
static struct init_node *constructor_pending_elts;
/* The SPELLING_DEPTH of this constructor. */
static int constructor_depth;
/* DECL node for which an initializer is being read.
0 means we are reading a constructor expression
such as (struct foo) {...}. */
static tree constructor_decl;
/* Nonzero if this is an initializer for a top-level decl. */
static int constructor_top_level;
/* Nonzero if there were any member designators in this initializer. */
static int constructor_designated;
/* Nesting depth of designator list. */
static int designator_depth;
/* Nonzero if there were diagnosed errors in this designator list. */
static int designator_erroneous;
/* This stack has a level for each implicit or explicit level of
structuring in the initializer, including the outermost one. It
saves the values of most of the variables above. */
struct constructor_range_stack;
struct constructor_stack
{
struct constructor_stack *next;
tree type;
tree fields;
tree index;
tree max_index;
tree unfilled_index;
tree unfilled_fields;
tree bit_index;
vec<constructor_elt, va_gc> *elements;
struct init_node *pending_elts;
int offset;
int depth;
/* If value nonzero, this value should replace the entire
constructor at this level. */
struct c_expr replacement_value;
struct constructor_range_stack *range_stack;
char constant;
char simple;
char nonconst;
char implicit;
char erroneous;
char outer;
char incremental;
char designated;
int designator_depth;
};
static struct constructor_stack *constructor_stack;
/* This stack represents designators from some range designator up to
the last designator in the list. */
struct constructor_range_stack
{
struct constructor_range_stack *next, *prev;
struct constructor_stack *stack;
tree range_start;
tree index;
tree range_end;
tree fields;
};
static struct constructor_range_stack *constructor_range_stack;
/* This stack records separate initializers that are nested.
Nested initializers can't happen in ANSI C, but GNU C allows them
in cases like { ... (struct foo) { ... } ... }. */
struct initializer_stack
{
struct initializer_stack *next;
tree decl;
struct constructor_stack *constructor_stack;
struct constructor_range_stack *constructor_range_stack;
vec<constructor_elt, va_gc> *elements;
struct spelling *spelling;
struct spelling *spelling_base;
int spelling_size;
char top_level;
char require_constant_value;
char require_constant_elements;
rich_location *missing_brace_richloc;
};
static struct initializer_stack *initializer_stack;
/* Prepare to parse and output the initializer for variable DECL. */
void
start_init (tree decl, tree asmspec_tree ATTRIBUTE_UNUSED, int top_level,
rich_location *richloc)
{
const char *locus;
struct initializer_stack *p = XNEW (struct initializer_stack);
p->decl = constructor_decl;
p->require_constant_value = require_constant_value;
p->require_constant_elements = require_constant_elements;
p->constructor_stack = constructor_stack;
p->constructor_range_stack = constructor_range_stack;
p->elements = constructor_elements;
p->spelling = spelling;
p->spelling_base = spelling_base;
p->spelling_size = spelling_size;
p->top_level = constructor_top_level;
p->next = initializer_stack;
p->missing_brace_richloc = richloc;
initializer_stack = p;
constructor_decl = decl;
constructor_designated = 0;
constructor_top_level = top_level;
if (decl != NULL_TREE && decl != error_mark_node)
{
require_constant_value = TREE_STATIC (decl);
require_constant_elements
= ((TREE_STATIC (decl) || (pedantic && !flag_isoc99))
/* For a scalar, you can always use any value to initialize,
even within braces. */
&& AGGREGATE_TYPE_P (TREE_TYPE (decl)));
locus = identifier_to_locale (IDENTIFIER_POINTER (DECL_NAME (decl)));
}
else
{
require_constant_value = 0;
require_constant_elements = 0;
locus = _("(anonymous)");
}
constructor_stack = 0;
constructor_range_stack = 0;
found_missing_braces = 0;
spelling_base = 0;
spelling_size = 0;
RESTORE_SPELLING_DEPTH (0);
if (locus)
push_string (locus);
}
void
finish_init (void)
{
struct initializer_stack *p = initializer_stack;
/* Free the whole constructor stack of this initializer. */
while (constructor_stack)
{
struct constructor_stack *q = constructor_stack;
constructor_stack = q->next;
free (q);
}
gcc_assert (!constructor_range_stack);
/* Pop back to the data of the outer initializer (if any). */
free (spelling_base);
constructor_decl = p->decl;
require_constant_value = p->require_constant_value;
require_constant_elements = p->require_constant_elements;
constructor_stack = p->constructor_stack;
constructor_range_stack = p->constructor_range_stack;
constructor_elements = p->elements;
spelling = p->spelling;
spelling_base = p->spelling_base;
spelling_size = p->spelling_size;
constructor_top_level = p->top_level;
initializer_stack = p->next;
free (p);
}
/* Call here when we see the initializer is surrounded by braces.
This is instead of a call to push_init_level;
it is matched by a call to pop_init_level.
TYPE is the type to initialize, for a constructor expression.
For an initializer for a decl, TYPE is zero. */
void
really_start_incremental_init (tree type)
{
struct constructor_stack *p = XNEW (struct constructor_stack);
if (type == NULL_TREE)
type = TREE_TYPE (constructor_decl);
if (VECTOR_TYPE_P (type)
&& TYPE_VECTOR_OPAQUE (type))
error ("opaque vector types cannot be initialized");
p->type = constructor_type;
p->fields = constructor_fields;
p->index = constructor_index;
p->max_index = constructor_max_index;
p->unfilled_index = constructor_unfilled_index;
p->unfilled_fields = constructor_unfilled_fields;
p->bit_index = constructor_bit_index;
p->elements = constructor_elements;
p->constant = constructor_constant;
p->simple = constructor_simple;
p->nonconst = constructor_nonconst;
p->erroneous = constructor_erroneous;
p->pending_elts = constructor_pending_elts;
p->depth = constructor_depth;
p->replacement_value.value = 0;
p->replacement_value.original_code = ERROR_MARK;
p->replacement_value.original_type = NULL;
p->implicit = 0;
p->range_stack = 0;
p->outer = 0;
p->incremental = constructor_incremental;
p->designated = constructor_designated;
p->designator_depth = designator_depth;
p->next = 0;
constructor_stack = p;
constructor_constant = 1;
constructor_simple = 1;
constructor_nonconst = 0;
constructor_depth = SPELLING_DEPTH ();
constructor_elements = NULL;
constructor_pending_elts = 0;
constructor_type = type;
constructor_incremental = 1;
constructor_designated = 0;
constructor_zeroinit = 1;
designator_depth = 0;
designator_erroneous = 0;
if (RECORD_OR_UNION_TYPE_P (constructor_type))
{
constructor_fields = TYPE_FIELDS (constructor_type);
/* Skip any nameless bit fields at the beginning. */
while (constructor_fields != NULL_TREE
&& DECL_UNNAMED_BIT_FIELD (constructor_fields))
constructor_fields = DECL_CHAIN (constructor_fields);
constructor_unfilled_fields = constructor_fields;
constructor_bit_index = bitsize_zero_node;
}
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
if (TYPE_DOMAIN (constructor_type))
{
constructor_max_index
= TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type));
/* Detect non-empty initializations of zero-length arrays. */
if (constructor_max_index == NULL_TREE
&& TYPE_SIZE (constructor_type))
constructor_max_index = integer_minus_one_node;
/* constructor_max_index needs to be an INTEGER_CST. Attempts
to initialize VLAs will cause a proper error; avoid tree
checking errors as well by setting a safe value. */
if (constructor_max_index
&& TREE_CODE (constructor_max_index) != INTEGER_CST)
constructor_max_index = integer_minus_one_node;
constructor_index
= convert (bitsizetype,
TYPE_MIN_VALUE (TYPE_DOMAIN (constructor_type)));
}
else
{
constructor_index = bitsize_zero_node;
constructor_max_index = NULL_TREE;
}
constructor_unfilled_index = constructor_index;
}
else if (gnu_vector_type_p (constructor_type))
{
/* Vectors are like simple fixed-size arrays. */
constructor_max_index =
bitsize_int (TYPE_VECTOR_SUBPARTS (constructor_type) - 1);
constructor_index = bitsize_zero_node;
constructor_unfilled_index = constructor_index;
}
else
{
/* Handle the case of int x = {5}; */
constructor_fields = constructor_type;
constructor_unfilled_fields = constructor_type;
}
}
extern location_t last_init_list_comma;
/* Called when we see an open brace for a nested initializer. Finish
off any pending levels with implicit braces. */
void
finish_implicit_inits (location_t loc, struct obstack *braced_init_obstack)
{
while (constructor_stack->implicit)
{
if (RECORD_OR_UNION_TYPE_P (constructor_type)
&& constructor_fields == NULL_TREE)
process_init_element (input_location,
pop_init_level (loc, 1, braced_init_obstack,
last_init_list_comma),
true, braced_init_obstack);
else if (TREE_CODE (constructor_type) == ARRAY_TYPE
&& constructor_max_index
&& tree_int_cst_lt (constructor_max_index,
constructor_index))
process_init_element (input_location,
pop_init_level (loc, 1, braced_init_obstack,
last_init_list_comma),
true, braced_init_obstack);
else
break;
}
}
/* Push down into a subobject, for initialization.
If this is for an explicit set of braces, IMPLICIT is 0.
If it is because the next element belongs at a lower level,
IMPLICIT is 1 (or 2 if the push is because of designator list). */
void
push_init_level (location_t loc, int implicit,
struct obstack *braced_init_obstack)
{
struct constructor_stack *p;
tree value = NULL_TREE;
/* Unless this is an explicit brace, we need to preserve previous
content if any. */
if (implicit)
{
if (RECORD_OR_UNION_TYPE_P (constructor_type) && constructor_fields)
value = find_init_member (constructor_fields, braced_init_obstack);
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
value = find_init_member (constructor_index, braced_init_obstack);
}
p = XNEW (struct constructor_stack);
p->type = constructor_type;
p->fields = constructor_fields;
p->index = constructor_index;
p->max_index = constructor_max_index;
p->unfilled_index = constructor_unfilled_index;
p->unfilled_fields = constructor_unfilled_fields;
p->bit_index = constructor_bit_index;
p->elements = constructor_elements;
p->constant = constructor_constant;
p->simple = constructor_simple;
p->nonconst = constructor_nonconst;
p->erroneous = constructor_erroneous;
p->pending_elts = constructor_pending_elts;
p->depth = constructor_depth;
p->replacement_value.value = NULL_TREE;
p->replacement_value.original_code = ERROR_MARK;
p->replacement_value.original_type = NULL;
p->implicit = implicit;
p->outer = 0;
p->incremental = constructor_incremental;
p->designated = constructor_designated;
p->designator_depth = designator_depth;
p->next = constructor_stack;
p->range_stack = 0;
constructor_stack = p;
constructor_constant = 1;
constructor_simple = 1;
constructor_nonconst = 0;
constructor_depth = SPELLING_DEPTH ();
constructor_elements = NULL;
constructor_incremental = 1;
constructor_designated = 0;
constructor_pending_elts = 0;
if (!implicit)
{
p->range_stack = constructor_range_stack;
constructor_range_stack = 0;
designator_depth = 0;
designator_erroneous = 0;
}
/* Don't die if an entire brace-pair level is superfluous
in the containing level. */
if (constructor_type == NULL_TREE)
;
else if (RECORD_OR_UNION_TYPE_P (constructor_type))
{
/* Don't die if there are extra init elts at the end. */
if (constructor_fields == NULL_TREE)
constructor_type = NULL_TREE;
else
{
constructor_type = TREE_TYPE (constructor_fields);
push_member_name (constructor_fields);
constructor_depth++;
}
/* If upper initializer is designated, then mark this as
designated too to prevent bogus warnings. */
constructor_designated = p->designated;
}
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
constructor_type = TREE_TYPE (constructor_type);
push_array_bounds (tree_to_uhwi (constructor_index));
constructor_depth++;
}
if (constructor_type == NULL_TREE)
{
error_init (loc, "extra brace group at end of initializer");
constructor_fields = NULL_TREE;
constructor_unfilled_fields = NULL_TREE;
return;
}
if (value && TREE_CODE (value) == CONSTRUCTOR)
{
constructor_constant = TREE_CONSTANT (value);
constructor_simple = TREE_STATIC (value);
constructor_nonconst = CONSTRUCTOR_NON_CONST (value);
constructor_elements = CONSTRUCTOR_ELTS (value);
if (!vec_safe_is_empty (constructor_elements)
&& (TREE_CODE (constructor_type) == RECORD_TYPE
|| TREE_CODE (constructor_type) == ARRAY_TYPE))
set_nonincremental_init (braced_init_obstack);
}
if (implicit == 1)
{
found_missing_braces = 1;
if (initializer_stack->missing_brace_richloc)
initializer_stack->missing_brace_richloc->add_fixit_insert_before
(loc, "{");
}
if (RECORD_OR_UNION_TYPE_P (constructor_type))
{
constructor_fields = TYPE_FIELDS (constructor_type);
/* Skip any nameless bit fields at the beginning. */
while (constructor_fields != NULL_TREE
&& DECL_UNNAMED_BIT_FIELD (constructor_fields))
constructor_fields = DECL_CHAIN (constructor_fields);
constructor_unfilled_fields = constructor_fields;
constructor_bit_index = bitsize_zero_node;
}
else if (gnu_vector_type_p (constructor_type))
{
/* Vectors are like simple fixed-size arrays. */
constructor_max_index =
bitsize_int (TYPE_VECTOR_SUBPARTS (constructor_type) - 1);
constructor_index = bitsize_int (0);
constructor_unfilled_index = constructor_index;
}
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
if (TYPE_DOMAIN (constructor_type))
{
constructor_max_index
= TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type));
/* Detect non-empty initializations of zero-length arrays. */
if (constructor_max_index == NULL_TREE
&& TYPE_SIZE (constructor_type))
constructor_max_index = integer_minus_one_node;
/* constructor_max_index needs to be an INTEGER_CST. Attempts
to initialize VLAs will cause a proper error; avoid tree
checking errors as well by setting a safe value. */
if (constructor_max_index
&& TREE_CODE (constructor_max_index) != INTEGER_CST)
constructor_max_index = integer_minus_one_node;
constructor_index
= convert (bitsizetype,
TYPE_MIN_VALUE (TYPE_DOMAIN (constructor_type)));
}
else
constructor_index = bitsize_zero_node;
constructor_unfilled_index = constructor_index;
if (value && TREE_CODE (value) == STRING_CST)
{
/* We need to split the char/wchar array into individual
characters, so that we don't have to special case it
everywhere. */
set_nonincremental_init_from_string (value, braced_init_obstack);
}
}
else
{
if (constructor_type != error_mark_node)
warning_init (input_location, 0, "braces around scalar initializer");
constructor_fields = constructor_type;
constructor_unfilled_fields = constructor_type;
}
}
/* At the end of an implicit or explicit brace level,
finish up that level of constructor. If a single expression
with redundant braces initialized that level, return the
c_expr structure for that expression. Otherwise, the original_code
element is set to ERROR_MARK.
If we were outputting the elements as they are read, return 0 as the value
from inner levels (process_init_element ignores that),
but return error_mark_node as the value from the outermost level
(that's what we want to put in DECL_INITIAL).
Otherwise, return a CONSTRUCTOR expression as the value. */
struct c_expr
pop_init_level (location_t loc, int implicit,
struct obstack *braced_init_obstack,
location_t insert_before)
{
struct constructor_stack *p;
struct c_expr ret;
ret.value = NULL_TREE;
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
if (implicit == 0)
{
/* When we come to an explicit close brace,
pop any inner levels that didn't have explicit braces. */
while (constructor_stack->implicit)
process_init_element (input_location,
pop_init_level (loc, 1, braced_init_obstack,
insert_before),
true, braced_init_obstack);
gcc_assert (!constructor_range_stack);
}
else
if (initializer_stack->missing_brace_richloc)
initializer_stack->missing_brace_richloc->add_fixit_insert_before
(insert_before, "}");
/* Now output all pending elements. */
constructor_incremental = 1;
output_pending_init_elements (1, braced_init_obstack);
p = constructor_stack;
/* Error for initializing a flexible array member, or a zero-length
array member in an inappropriate context. */
if (constructor_type && constructor_fields
&& TREE_CODE (constructor_type) == ARRAY_TYPE
&& TYPE_DOMAIN (constructor_type)
&& !TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type)))
{
/* Silently discard empty initializations. The parser will
already have pedwarned for empty brackets. */
if (integer_zerop (constructor_unfilled_index))
constructor_type = NULL_TREE;
else
{
gcc_assert (!TYPE_SIZE (constructor_type));
if (constructor_depth > 2)
error_init (loc, "initialization of flexible array member in a nested context");
else
pedwarn_init (loc, OPT_Wpedantic,
"initialization of a flexible array member");
/* We have already issued an error message for the existence
of a flexible array member not at the end of the structure.
Discard the initializer so that we do not die later. */
if (DECL_CHAIN (constructor_fields) != NULL_TREE)
constructor_type = NULL_TREE;
}
}
switch (vec_safe_length (constructor_elements))
{
case 0:
/* Initialization with { } counts as zeroinit. */
constructor_zeroinit = 1;
break;
case 1:
/* This might be zeroinit as well. */
if (integer_zerop ((*constructor_elements)[0].value))
constructor_zeroinit = 1;
break;
default:
/* If the constructor has more than one element, it can't be { 0 }. */
constructor_zeroinit = 0;
break;
}
/* Warn when some structs are initialized with direct aggregation. */
if (!implicit && found_missing_braces && warn_missing_braces
&& !constructor_zeroinit)
{
gcc_assert (initializer_stack->missing_brace_richloc);
warning_at (initializer_stack->missing_brace_richloc,
OPT_Wmissing_braces,
"missing braces around initializer");
}
/* Warn when some struct elements are implicitly initialized to zero. */
if (warn_missing_field_initializers
&& constructor_type
&& TREE_CODE (constructor_type) == RECORD_TYPE
&& constructor_unfilled_fields)
{
/* Do not warn for flexible array members or zero-length arrays. */
while (constructor_unfilled_fields
&& (!DECL_SIZE (constructor_unfilled_fields)
|| integer_zerop (DECL_SIZE (constructor_unfilled_fields))))
constructor_unfilled_fields = DECL_CHAIN (constructor_unfilled_fields);
if (constructor_unfilled_fields
/* Do not warn if this level of the initializer uses member
designators; it is likely to be deliberate. */
&& !constructor_designated
/* Do not warn about initializing with { 0 } or with { }. */
&& !constructor_zeroinit)
{
if (warning_at (input_location, OPT_Wmissing_field_initializers,
"missing initializer for field %qD of %qT",
constructor_unfilled_fields,
constructor_type))
inform (DECL_SOURCE_LOCATION (constructor_unfilled_fields),
"%qD declared here", constructor_unfilled_fields);
}
}
/* Pad out the end of the structure. */
if (p->replacement_value.value)
/* If this closes a superfluous brace pair,
just pass out the element between them. */
ret = p->replacement_value;
else if (constructor_type == NULL_TREE)
;
else if (!RECORD_OR_UNION_TYPE_P (constructor_type)
&& TREE_CODE (constructor_type) != ARRAY_TYPE
&& !gnu_vector_type_p (constructor_type))
{
/* A nonincremental scalar initializer--just return
the element, after verifying there is just one. */
if (vec_safe_is_empty (constructor_elements))
{
if (!constructor_erroneous && constructor_type != error_mark_node)
error_init (loc, "empty scalar initializer");
ret.value = error_mark_node;
}
else if (vec_safe_length (constructor_elements) != 1)
{
error_init (loc, "extra elements in scalar initializer");
ret.value = (*constructor_elements)[0].value;
}
else
ret.value = (*constructor_elements)[0].value;
}
else
{
if (constructor_erroneous)
ret.value = error_mark_node;
else
{
ret.value = build_constructor (constructor_type,
constructor_elements);
if (constructor_constant)
TREE_CONSTANT (ret.value) = 1;
if (constructor_constant && constructor_simple)
TREE_STATIC (ret.value) = 1;
if (constructor_nonconst)
CONSTRUCTOR_NON_CONST (ret.value) = 1;
}
}
if (ret.value && TREE_CODE (ret.value) != CONSTRUCTOR)
{
if (constructor_nonconst)
ret.original_code = C_MAYBE_CONST_EXPR;
else if (ret.original_code == C_MAYBE_CONST_EXPR)
ret.original_code = ERROR_MARK;
}
constructor_type = p->type;
constructor_fields = p->fields;
constructor_index = p->index;
constructor_max_index = p->max_index;
constructor_unfilled_index = p->unfilled_index;
constructor_unfilled_fields = p->unfilled_fields;
constructor_bit_index = p->bit_index;
constructor_elements = p->elements;
constructor_constant = p->constant;
constructor_simple = p->simple;
constructor_nonconst = p->nonconst;
constructor_erroneous = p->erroneous;
constructor_incremental = p->incremental;
constructor_designated = p->designated;
designator_depth = p->designator_depth;
constructor_pending_elts = p->pending_elts;
constructor_depth = p->depth;
if (!p->implicit)
constructor_range_stack = p->range_stack;
RESTORE_SPELLING_DEPTH (constructor_depth);
constructor_stack = p->next;
free (p);
if (ret.value == NULL_TREE && constructor_stack == 0)
ret.value = error_mark_node;
return ret;
}
/* Common handling for both array range and field name designators.
ARRAY argument is nonzero for array ranges. Returns false for success. */
static bool
set_designator (location_t loc, bool array,
struct obstack *braced_init_obstack)
{
tree subtype;
enum tree_code subcode;
/* Don't die if an entire brace-pair level is superfluous
in the containing level, or for an erroneous type. */
if (constructor_type == NULL_TREE || constructor_type == error_mark_node)
return true;
/* If there were errors in this designator list already, bail out
silently. */
if (designator_erroneous)
return true;
/* Likewise for an initializer for a variable-size type. Those are
diagnosed in digest_init. */
if (COMPLETE_TYPE_P (constructor_type)
&& TREE_CODE (TYPE_SIZE (constructor_type)) != INTEGER_CST)
return true;
if (!designator_depth)
{
gcc_assert (!constructor_range_stack);
/* Designator list starts at the level of closest explicit
braces. */
while (constructor_stack->implicit)
process_init_element (input_location,
pop_init_level (loc, 1, braced_init_obstack,
last_init_list_comma),
true, braced_init_obstack);
constructor_designated = 1;
return false;
}
switch (TREE_CODE (constructor_type))
{
case RECORD_TYPE:
case UNION_TYPE:
subtype = TREE_TYPE (constructor_fields);
if (subtype != error_mark_node)
subtype = TYPE_MAIN_VARIANT (subtype);
break;
case ARRAY_TYPE:
subtype = TYPE_MAIN_VARIANT (TREE_TYPE (constructor_type));
break;
default:
gcc_unreachable ();
}
subcode = TREE_CODE (subtype);
if (array && subcode != ARRAY_TYPE)
{
error_init (loc, "array index in non-array initializer");
return true;
}
else if (!array && subcode != RECORD_TYPE && subcode != UNION_TYPE)
{
error_init (loc, "field name not in record or union initializer");
return true;
}
constructor_designated = 1;
finish_implicit_inits (loc, braced_init_obstack);
push_init_level (loc, 2, braced_init_obstack);
return false;
}
/* If there are range designators in designator list, push a new designator
to constructor_range_stack. RANGE_END is end of such stack range or
NULL_TREE if there is no range designator at this level. */
static void
push_range_stack (tree range_end, struct obstack * braced_init_obstack)
{
struct constructor_range_stack *p;
p = (struct constructor_range_stack *)
obstack_alloc (braced_init_obstack,
sizeof (struct constructor_range_stack));
p->prev = constructor_range_stack;
p->next = 0;
p->fields = constructor_fields;
p->range_start = constructor_index;
p->index = constructor_index;
p->stack = constructor_stack;
p->range_end = range_end;
if (constructor_range_stack)
constructor_range_stack->next = p;
constructor_range_stack = p;
}
/* Within an array initializer, specify the next index to be initialized.
FIRST is that index. If LAST is nonzero, then initialize a range
of indices, running from FIRST through LAST. */
void
set_init_index (location_t loc, tree first, tree last,
struct obstack *braced_init_obstack)
{
if (set_designator (loc, true, braced_init_obstack))
return;
designator_erroneous = 1;
if (!INTEGRAL_TYPE_P (TREE_TYPE (first))
|| (last && !INTEGRAL_TYPE_P (TREE_TYPE (last))))
{
error_init (loc, "array index in initializer not of integer type");
return;
}
if (TREE_CODE (first) != INTEGER_CST)
{
first = c_fully_fold (first, false, NULL);
if (TREE_CODE (first) == INTEGER_CST)
pedwarn_init (loc, OPT_Wpedantic,
"array index in initializer is not "
"an integer constant expression");
}
if (last && TREE_CODE (last) != INTEGER_CST)
{
last = c_fully_fold (last, false, NULL);
if (TREE_CODE (last) == INTEGER_CST)
pedwarn_init (loc, OPT_Wpedantic,
"array index in initializer is not "
"an integer constant expression");
}
if (TREE_CODE (first) != INTEGER_CST)
error_init (loc, "nonconstant array index in initializer");
else if (last != NULL_TREE && TREE_CODE (last) != INTEGER_CST)
error_init (loc, "nonconstant array index in initializer");
else if (TREE_CODE (constructor_type) != ARRAY_TYPE)
error_init (loc, "array index in non-array initializer");
else if (tree_int_cst_sgn (first) == -1)
error_init (loc, "array index in initializer exceeds array bounds");
else if (constructor_max_index
&& tree_int_cst_lt (constructor_max_index, first))
error_init (loc, "array index in initializer exceeds array bounds");
else
{
constant_expression_warning (first);
if (last)
constant_expression_warning (last);
constructor_index = convert (bitsizetype, first);
if (tree_int_cst_lt (constructor_index, first))
{
constructor_index = copy_node (constructor_index);
TREE_OVERFLOW (constructor_index) = 1;
}
if (last)
{
if (tree_int_cst_equal (first, last))
last = NULL_TREE;
else if (tree_int_cst_lt (last, first))
{
error_init (loc, "empty index range in initializer");
last = NULL_TREE;
}
else
{
last = convert (bitsizetype, last);
if (constructor_max_index != NULL_TREE
&& tree_int_cst_lt (constructor_max_index, last))
{
error_init (loc, "array index range in initializer exceeds "
"array bounds");
last = NULL_TREE;
}
}
}
designator_depth++;
designator_erroneous = 0;
if (constructor_range_stack || last)
push_range_stack (last, braced_init_obstack);
}
}
/* Within a struct initializer, specify the next field to be initialized. */
void
set_init_label (location_t loc, tree fieldname, location_t fieldname_loc,
struct obstack *braced_init_obstack)
{
tree field;
if (set_designator (loc, false, braced_init_obstack))
return;
designator_erroneous = 1;
if (!RECORD_OR_UNION_TYPE_P (constructor_type))
{
error_init (loc, "field name not in record or union initializer");
return;
}
field = lookup_field (constructor_type, fieldname);
if (field == NULL_TREE)
{
tree guessed_id = lookup_field_fuzzy (constructor_type, fieldname);
if (guessed_id)
{
gcc_rich_location rich_loc (fieldname_loc);
rich_loc.add_fixit_misspelled_id (fieldname_loc, guessed_id);
error_at (&rich_loc,
"%qT has no member named %qE; did you mean %qE?",
constructor_type, fieldname, guessed_id);
}
else
error_at (fieldname_loc, "%qT has no member named %qE",
constructor_type, fieldname);
}
else
do
{
constructor_fields = TREE_VALUE (field);
designator_depth++;
designator_erroneous = 0;
if (constructor_range_stack)
push_range_stack (NULL_TREE, braced_init_obstack);
field = TREE_CHAIN (field);
if (field)
{
if (set_designator (loc, false, braced_init_obstack))
return;
}
}
while (field != NULL_TREE);
}
/* Add a new initializer to the tree of pending initializers. PURPOSE
identifies the initializer, either array index or field in a structure.
VALUE is the value of that index or field. If ORIGTYPE is not
NULL_TREE, it is the original type of VALUE.
IMPLICIT is true if value comes from pop_init_level (1),
the new initializer has been merged with the existing one
and thus no warnings should be emitted about overriding an
existing initializer. */
static void
add_pending_init (location_t loc, tree purpose, tree value, tree origtype,
bool implicit, struct obstack *braced_init_obstack)
{
struct init_node *p, **q, *r;
q = &constructor_pending_elts;
p = 0;
if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
while (*q != 0)
{
p = *q;
if (tree_int_cst_lt (purpose, p->purpose))
q = &p->left;
else if (tree_int_cst_lt (p->purpose, purpose))
q = &p->right;
else
{
if (!implicit)
{
if (TREE_SIDE_EFFECTS (p->value))
warning_init (loc, OPT_Woverride_init_side_effects,
"initialized field with side-effects "
"overwritten");
else if (warn_override_init)
warning_init (loc, OPT_Woverride_init,
"initialized field overwritten");
}
p->value = value;
p->origtype = origtype;
return;
}
}
}
else
{
tree bitpos;
bitpos = bit_position (purpose);
while (*q != NULL)
{
p = *q;
if (tree_int_cst_lt (bitpos, bit_position (p->purpose)))
q = &p->left;
else if (p->purpose != purpose)
q = &p->right;
else
{
if (!implicit)
{
if (TREE_SIDE_EFFECTS (p->value))
warning_init (loc, OPT_Woverride_init_side_effects,
"initialized field with side-effects "
"overwritten");
else if (warn_override_init)
warning_init (loc, OPT_Woverride_init,
"initialized field overwritten");
}
p->value = value;
p->origtype = origtype;
return;
}
}
}
r = (struct init_node *) obstack_alloc (braced_init_obstack,
sizeof (struct init_node));
r->purpose = purpose;
r->value = value;
r->origtype = origtype;
*q = r;
r->parent = p;
r->left = 0;
r->right = 0;
r->balance = 0;
while (p)
{
struct init_node *s;
if (r == p->left)
{
if (p->balance == 0)
p->balance = -1;
else if (p->balance < 0)
{
if (r->balance < 0)
{
/* L rotation. */
p->left = r->right;
if (p->left)
p->left->parent = p;
r->right = p;
p->balance = 0;
r->balance = 0;
s = p->parent;
p->parent = r;
r->parent = s;
if (s)
{
if (s->left == p)
s->left = r;
else
s->right = r;
}
else
constructor_pending_elts = r;
}
else
{
/* LR rotation. */
struct init_node *t = r->right;
r->right = t->left;
if (r->right)
r->right->parent = r;
t->left = r;
p->left = t->right;
if (p->left)
p->left->parent = p;
t->right = p;
p->balance = t->balance < 0;
r->balance = -(t->balance > 0);
t->balance = 0;
s = p->parent;
p->parent = t;
r->parent = t;
t->parent = s;
if (s)
{
if (s->left == p)
s->left = t;
else
s->right = t;
}
else
constructor_pending_elts = t;
}
break;
}
else
{
/* p->balance == +1; growth of left side balances the node. */
p->balance = 0;
break;
}
}
else /* r == p->right */
{
if (p->balance == 0)
/* Growth propagation from right side. */
p->balance++;
else if (p->balance > 0)
{
if (r->balance > 0)
{
/* R rotation. */
p->right = r->left;
if (p->right)
p->right->parent = p;
r->left = p;
p->balance = 0;
r->balance = 0;
s = p->parent;
p->parent = r;
r->parent = s;
if (s)
{
if (s->left == p)
s->left = r;
else
s->right = r;
}
else
constructor_pending_elts = r;
}
else /* r->balance == -1 */
{
/* RL rotation */
struct init_node *t = r->left;
r->left = t->right;
if (r->left)
r->left->parent = r;
t->right = r;
p->right = t->left;
if (p->right)
p->right->parent = p;
t->left = p;
r->balance = (t->balance < 0);
p->balance = -(t->balance > 0);
t->balance = 0;
s = p->parent;
p->parent = t;
r->parent = t;
t->parent = s;
if (s)
{
if (s->left == p)
s->left = t;
else
s->right = t;
}
else
constructor_pending_elts = t;
}
break;
}
else
{
/* p->balance == -1; growth of right side balances the node. */
p->balance = 0;
break;
}
}
r = p;
p = p->parent;
}
}
/* Build AVL tree from a sorted chain. */
static void
set_nonincremental_init (struct obstack * braced_init_obstack)
{
unsigned HOST_WIDE_INT ix;
tree index, value;
if (TREE_CODE (constructor_type) != RECORD_TYPE
&& TREE_CODE (constructor_type) != ARRAY_TYPE)
return;
FOR_EACH_CONSTRUCTOR_ELT (constructor_elements, ix, index, value)
add_pending_init (input_location, index, value, NULL_TREE, true,
braced_init_obstack);
constructor_elements = NULL;
if (TREE_CODE (constructor_type) == RECORD_TYPE)
{
constructor_unfilled_fields = TYPE_FIELDS (constructor_type);
/* Skip any nameless bit fields at the beginning. */
while (constructor_unfilled_fields != NULL_TREE
&& DECL_UNNAMED_BIT_FIELD (constructor_unfilled_fields))
constructor_unfilled_fields = TREE_CHAIN (constructor_unfilled_fields);
}
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
if (TYPE_DOMAIN (constructor_type))
constructor_unfilled_index
= convert (bitsizetype,
TYPE_MIN_VALUE (TYPE_DOMAIN (constructor_type)));
else
constructor_unfilled_index = bitsize_zero_node;
}
constructor_incremental = 0;
}
/* Build AVL tree from a string constant. */
static void
set_nonincremental_init_from_string (tree str,
struct obstack * braced_init_obstack)
{
tree value, purpose, type;
HOST_WIDE_INT val[2];
const char *p, *end;
int byte, wchar_bytes, charwidth, bitpos;
gcc_assert (TREE_CODE (constructor_type) == ARRAY_TYPE);
wchar_bytes = TYPE_PRECISION (TREE_TYPE (TREE_TYPE (str))) / BITS_PER_UNIT;
charwidth = TYPE_PRECISION (char_type_node);
gcc_assert ((size_t) wchar_bytes * charwidth
<= ARRAY_SIZE (val) * HOST_BITS_PER_WIDE_INT);
type = TREE_TYPE (constructor_type);
p = TREE_STRING_POINTER (str);
end = p + TREE_STRING_LENGTH (str);
for (purpose = bitsize_zero_node;
p < end
&& !(constructor_max_index
&& tree_int_cst_lt (constructor_max_index, purpose));
purpose = size_binop (PLUS_EXPR, purpose, bitsize_one_node))
{
if (wchar_bytes == 1)
{
val[0] = (unsigned char) *p++;
val[1] = 0;
}
else
{
val[1] = 0;
val[0] = 0;
for (byte = 0; byte < wchar_bytes; byte++)
{
if (BYTES_BIG_ENDIAN)
bitpos = (wchar_bytes - byte - 1) * charwidth;
else
bitpos = byte * charwidth;
val[bitpos / HOST_BITS_PER_WIDE_INT]
|= ((unsigned HOST_WIDE_INT) ((unsigned char) *p++))
<< (bitpos % HOST_BITS_PER_WIDE_INT);
}
}
if (!TYPE_UNSIGNED (type))
{
bitpos = ((wchar_bytes - 1) * charwidth) + HOST_BITS_PER_CHAR;
if (bitpos < HOST_BITS_PER_WIDE_INT)
{
if (val[0] & (HOST_WIDE_INT_1 << (bitpos - 1)))
{
val[0] |= HOST_WIDE_INT_M1U << bitpos;
val[1] = -1;
}
}
else if (bitpos == HOST_BITS_PER_WIDE_INT)
{
if (val[0] < 0)
val[1] = -1;
}
else if (val[1] & (HOST_WIDE_INT_1
<< (bitpos - 1 - HOST_BITS_PER_WIDE_INT)))
val[1] |= HOST_WIDE_INT_M1U << (bitpos - HOST_BITS_PER_WIDE_INT);
}
value = wide_int_to_tree (type,
wide_int::from_array (val, 2,
HOST_BITS_PER_WIDE_INT * 2));
add_pending_init (input_location, purpose, value, NULL_TREE, true,
braced_init_obstack);
}
constructor_incremental = 0;
}
/* Return value of FIELD in pending initializer or NULL_TREE if the field was
not initialized yet. */
static tree
find_init_member (tree field, struct obstack * braced_init_obstack)
{
struct init_node *p;
if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
if (constructor_incremental
&& tree_int_cst_lt (field, constructor_unfilled_index))
set_nonincremental_init (braced_init_obstack);
p = constructor_pending_elts;
while (p)
{
if (tree_int_cst_lt (field, p->purpose))
p = p->left;
else if (tree_int_cst_lt (p->purpose, field))
p = p->right;
else
return p->value;
}
}
else if (TREE_CODE (constructor_type) == RECORD_TYPE)
{
tree bitpos = bit_position (field);
if (constructor_incremental
&& (!constructor_unfilled_fields
|| tree_int_cst_lt (bitpos,
bit_position (constructor_unfilled_fields))))
set_nonincremental_init (braced_init_obstack);
p = constructor_pending_elts;
while (p)
{
if (field == p->purpose)
return p->value;
else if (tree_int_cst_lt (bitpos, bit_position (p->purpose)))
p = p->left;
else
p = p->right;
}
}
else if (TREE_CODE (constructor_type) == UNION_TYPE)
{
if (!vec_safe_is_empty (constructor_elements)
&& (constructor_elements->last ().index == field))
return constructor_elements->last ().value;
}
return NULL_TREE;
}
/* "Output" the next constructor element.
At top level, really output it to assembler code now.
Otherwise, collect it in a list from which we will make a CONSTRUCTOR.
If ORIGTYPE is not NULL_TREE, it is the original type of VALUE.
TYPE is the data type that the containing data type wants here.
FIELD is the field (a FIELD_DECL) or the index that this element fills.
If VALUE is a string constant, STRICT_STRING is true if it is
unparenthesized or we should not warn here for it being parenthesized.
For other types of VALUE, STRICT_STRING is not used.
PENDING if true means output pending elements that belong
right after this element. (PENDING is normally true;
it is false while outputting pending elements, to avoid recursion.)
IMPLICIT is true if value comes from pop_init_level (1),
the new initializer has been merged with the existing one
and thus no warnings should be emitted about overriding an
existing initializer. */
static void
output_init_element (location_t loc, tree value, tree origtype,
bool strict_string, tree type, tree field, bool pending,
bool implicit, struct obstack * braced_init_obstack)
{
tree semantic_type = NULL_TREE;
bool maybe_const = true;
bool npc;
if (type == error_mark_node || value == error_mark_node)
{
constructor_erroneous = 1;
return;
}
if (TREE_CODE (TREE_TYPE (value)) == ARRAY_TYPE
&& (TREE_CODE (value) == STRING_CST
|| TREE_CODE (value) == COMPOUND_LITERAL_EXPR)
&& !(TREE_CODE (value) == STRING_CST
&& TREE_CODE (type) == ARRAY_TYPE
&& INTEGRAL_TYPE_P (TREE_TYPE (type)))
&& !comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (value)),
TYPE_MAIN_VARIANT (type)))
value = array_to_pointer_conversion (input_location, value);
if (TREE_CODE (value) == COMPOUND_LITERAL_EXPR
&& require_constant_value && pending)
{
/* As an extension, allow initializing objects with static storage
duration with compound literals (which are then treated just as
the brace enclosed list they contain). */
if (flag_isoc99)
pedwarn_init (loc, OPT_Wpedantic, "initializer element is not "
"constant");
tree decl = COMPOUND_LITERAL_EXPR_DECL (value);
value = DECL_INITIAL (decl);
}
npc = null_pointer_constant_p (value);
if (TREE_CODE (value) == EXCESS_PRECISION_EXPR)
{
semantic_type = TREE_TYPE (value);
value = TREE_OPERAND (value, 0);
}
value = c_fully_fold (value, require_constant_value, &maybe_const);
if (value == error_mark_node)
constructor_erroneous = 1;
else if (!TREE_CONSTANT (value))
constructor_constant = 0;
else if (!initializer_constant_valid_p (value,
TREE_TYPE (value),
AGGREGATE_TYPE_P (constructor_type)
&& TYPE_REVERSE_STORAGE_ORDER
(constructor_type))
|| (RECORD_OR_UNION_TYPE_P (constructor_type)
&& DECL_C_BIT_FIELD (field)
&& TREE_CODE (value) != INTEGER_CST))
constructor_simple = 0;
if (!maybe_const)
constructor_nonconst = 1;
/* Digest the initializer and issue any errors about incompatible
types before issuing errors about non-constant initializers. */
tree new_value = value;
if (semantic_type)
new_value = build1 (EXCESS_PRECISION_EXPR, semantic_type, value);
new_value = digest_init (loc, type, new_value, origtype, npc, strict_string,
require_constant_value);
if (new_value == error_mark_node)
{
constructor_erroneous = 1;
return;
}
if (require_constant_value || require_constant_elements)
constant_expression_warning (new_value);
/* Proceed to check the constness of the original initializer. */
if (!initializer_constant_valid_p (value, TREE_TYPE (value)))
{
if (require_constant_value)
{
error_init (loc, "initializer element is not constant");
value = error_mark_node;
}
else if (require_constant_elements)
pedwarn (loc, OPT_Wpedantic,
"initializer element is not computable at load time");
}
else if (!maybe_const
&& (require_constant_value || require_constant_elements))
pedwarn_init (loc, OPT_Wpedantic,
"initializer element is not a constant expression");
/* Issue -Wc++-compat warnings about initializing a bitfield with
enum type. */
if (warn_cxx_compat
&& field != NULL_TREE
&& TREE_CODE (field) == FIELD_DECL
&& DECL_BIT_FIELD_TYPE (field) != NULL_TREE
&& (TYPE_MAIN_VARIANT (DECL_BIT_FIELD_TYPE (field))
!= TYPE_MAIN_VARIANT (type))
&& TREE_CODE (DECL_BIT_FIELD_TYPE (field)) == ENUMERAL_TYPE)
{
tree checktype = origtype != NULL_TREE ? origtype : TREE_TYPE (value);
if (checktype != error_mark_node
&& (TYPE_MAIN_VARIANT (checktype)
!= TYPE_MAIN_VARIANT (DECL_BIT_FIELD_TYPE (field))))
warning_init (loc, OPT_Wc___compat,
"enum conversion in initialization is invalid in C++");
}
/* If this field is empty and does not have side effects (and is not at
the end of structure), don't do anything other than checking the
initializer. */
if (field
&& (TREE_TYPE (field) == error_mark_node
|| (COMPLETE_TYPE_P (TREE_TYPE (field))
&& integer_zerop (TYPE_SIZE (TREE_TYPE (field)))
&& !TREE_SIDE_EFFECTS (new_value)
&& (TREE_CODE (constructor_type) == ARRAY_TYPE
|| DECL_CHAIN (field)))))
return;
/* Finally, set VALUE to the initializer value digested above. */
value = new_value;
/* If this element doesn't come next in sequence,
put it on constructor_pending_elts. */
if (TREE_CODE (constructor_type) == ARRAY_TYPE
&& (!constructor_incremental
|| !tree_int_cst_equal (field, constructor_unfilled_index)))
{
if (constructor_incremental
&& tree_int_cst_lt (field, constructor_unfilled_index))
set_nonincremental_init (braced_init_obstack);
add_pending_init (loc, field, value, origtype, implicit,
braced_init_obstack);
return;
}
else if (TREE_CODE (constructor_type) == RECORD_TYPE
&& (!constructor_incremental
|| field != constructor_unfilled_fields))
{
/* We do this for records but not for unions. In a union,
no matter which field is specified, it can be initialized
right away since it starts at the beginning of the union. */
if (constructor_incremental)
{
if (!constructor_unfilled_fields)
set_nonincremental_init (braced_init_obstack);
else
{
tree bitpos, unfillpos;
bitpos = bit_position (field);
unfillpos = bit_position (constructor_unfilled_fields);
if (tree_int_cst_lt (bitpos, unfillpos))
set_nonincremental_init (braced_init_obstack);
}
}
add_pending_init (loc, field, value, origtype, implicit,
braced_init_obstack);
return;
}
else if (TREE_CODE (constructor_type) == UNION_TYPE
&& !vec_safe_is_empty (constructor_elements))
{
if (!implicit)
{
if (TREE_SIDE_EFFECTS (constructor_elements->last ().value))
warning_init (loc, OPT_Woverride_init_side_effects,
"initialized field with side-effects overwritten");
else if (warn_override_init)
warning_init (loc, OPT_Woverride_init,
"initialized field overwritten");
}
/* We can have just one union field set. */
constructor_elements = NULL;
}
/* Otherwise, output this element either to
constructor_elements or to the assembler file. */
constructor_elt celt = {field, value};
vec_safe_push (constructor_elements, celt);
/* Advance the variable that indicates sequential elements output. */
if (TREE_CODE (constructor_type) == ARRAY_TYPE)
constructor_unfilled_index
= size_binop_loc (input_location, PLUS_EXPR, constructor_unfilled_index,
bitsize_one_node);
else if (TREE_CODE (constructor_type) == RECORD_TYPE)
{
constructor_unfilled_fields
= DECL_CHAIN (constructor_unfilled_fields);
/* Skip any nameless bit fields. */
while (constructor_unfilled_fields != NULL_TREE
&& DECL_UNNAMED_BIT_FIELD (constructor_unfilled_fields))
constructor_unfilled_fields =
DECL_CHAIN (constructor_unfilled_fields);
}
else if (TREE_CODE (constructor_type) == UNION_TYPE)
constructor_unfilled_fields = NULL_TREE;
/* Now output any pending elements which have become next. */
if (pending)
output_pending_init_elements (0, braced_init_obstack);
}
/* For two FIELD_DECLs in the same chain, return -1 if field1
comes before field2, 1 if field1 comes after field2 and
0 if field1 == field2. */
static int
init_field_decl_cmp (tree field1, tree field2)
{
if (field1 == field2)
return 0;
tree bitpos1 = bit_position (field1);
tree bitpos2 = bit_position (field2);
if (tree_int_cst_equal (bitpos1, bitpos2))
{
/* If one of the fields has non-zero bitsize, then that
field must be the last one in a sequence of zero
sized fields, fields after it will have bigger
bit_position. */
if (TREE_TYPE (field1) != error_mark_node
&& COMPLETE_TYPE_P (TREE_TYPE (field1))
&& integer_nonzerop (TREE_TYPE (field1)))
return 1;
if (TREE_TYPE (field2) != error_mark_node
&& COMPLETE_TYPE_P (TREE_TYPE (field2))
&& integer_nonzerop (TREE_TYPE (field2)))
return -1;
/* Otherwise, fallback to DECL_CHAIN walk to find out
which field comes earlier. Walk chains of both
fields, so that if field1 and field2 are close to each
other in either order, it is found soon even for large
sequences of zero sized fields. */
tree f1 = field1, f2 = field2;
while (1)
{
f1 = DECL_CHAIN (f1);
f2 = DECL_CHAIN (f2);
if (f1 == NULL_TREE)
{
gcc_assert (f2);
return 1;
}
if (f2 == NULL_TREE)
return -1;
if (f1 == field2)
return -1;
if (f2 == field1)
return 1;
if (!tree_int_cst_equal (bit_position (f1), bitpos1))
return 1;
if (!tree_int_cst_equal (bit_position (f2), bitpos1))
return -1;
}
}
else if (tree_int_cst_lt (bitpos1, bitpos2))
return -1;
else
return 1;
}
/* Output any pending elements which have become next.
As we output elements, constructor_unfilled_{fields,index}
advances, which may cause other elements to become next;
if so, they too are output.
If ALL is 0, we return when there are
no more pending elements to output now.
If ALL is 1, we output space as necessary so that
we can output all the pending elements. */
static void
output_pending_init_elements (int all, struct obstack * braced_init_obstack)
{
struct init_node *elt = constructor_pending_elts;
tree next;
retry:
/* Look through the whole pending tree.
If we find an element that should be output now,
output it. Otherwise, set NEXT to the element
that comes first among those still pending. */
next = NULL_TREE;
while (elt)
{
if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
if (tree_int_cst_equal (elt->purpose,
constructor_unfilled_index))
output_init_element (input_location, elt->value, elt->origtype,
true, TREE_TYPE (constructor_type),
constructor_unfilled_index, false, false,
braced_init_obstack);
else if (tree_int_cst_lt (constructor_unfilled_index,
elt->purpose))
{
/* Advance to the next smaller node. */
if (elt->left)
elt = elt->left;
else
{
/* We have reached the smallest node bigger than the
current unfilled index. Fill the space first. */
next = elt->purpose;
break;
}
}
else
{
/* Advance to the next bigger node. */
if (elt->right)
elt = elt->right;
else
{
/* We have reached the biggest node in a subtree. Find
the parent of it, which is the next bigger node. */
while (elt->parent && elt->parent->right == elt)
elt = elt->parent;
elt = elt->parent;
if (elt && tree_int_cst_lt (constructor_unfilled_index,
elt->purpose))
{
next = elt->purpose;
break;
}
}
}
}
else if (RECORD_OR_UNION_TYPE_P (constructor_type))
{
/* If the current record is complete we are done. */
if (constructor_unfilled_fields == NULL_TREE)
break;
int cmp = init_field_decl_cmp (constructor_unfilled_fields,
elt->purpose);
if (cmp == 0)
output_init_element (input_location, elt->value, elt->origtype,
true, TREE_TYPE (elt->purpose),
elt->purpose, false, false,
braced_init_obstack);
else if (cmp < 0)
{
/* Advance to the next smaller node. */
if (elt->left)
elt = elt->left;
else
{
/* We have reached the smallest node bigger than the
current unfilled field. Fill the space first. */
next = elt->purpose;
break;
}
}
else
{
/* Advance to the next bigger node. */
if (elt->right)
elt = elt->right;
else
{
/* We have reached the biggest node in a subtree. Find
the parent of it, which is the next bigger node. */
while (elt->parent && elt->parent->right == elt)
elt = elt->parent;
elt = elt->parent;
if (elt
&& init_field_decl_cmp (constructor_unfilled_fields,
elt->purpose) < 0)
{
next = elt->purpose;
break;
}
}
}
}
}
/* Ordinarily return, but not if we want to output all
and there are elements left. */
if (!(all && next != NULL_TREE))
return;
/* If it's not incremental, just skip over the gap, so that after
jumping to retry we will output the next successive element. */
if (RECORD_OR_UNION_TYPE_P (constructor_type))
constructor_unfilled_fields = next;
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
constructor_unfilled_index = next;
/* ELT now points to the node in the pending tree with the next
initializer to output. */
goto retry;
}
/* Expression VALUE coincides with the start of type TYPE in a braced
initializer. Return true if we should treat VALUE as initializing
the first element of TYPE, false if we should treat it as initializing
TYPE as a whole.
If the initializer is clearly invalid, the question becomes:
which choice gives the best error message? */
static bool
initialize_elementwise_p (tree type, tree value)
{
if (type == error_mark_node || value == error_mark_node)
return false;
gcc_checking_assert (TYPE_MAIN_VARIANT (type) == type);
tree value_type = TREE_TYPE (value);
if (value_type == error_mark_node)
return false;
/* GNU vectors can be initialized elementwise. However, treat any
kind of vector value as initializing the vector type as a whole,
regardless of whether the value is a GNU vector. Such initializers
are valid if and only if they would have been valid in a non-braced
initializer like:
TYPE foo = VALUE;
so recursing into the vector type would be at best confusing or at
worst wrong. For example, when -flax-vector-conversions is in effect,
it's possible to initialize a V8HI from a V4SI, even though the vectors
have different element types and different numbers of elements. */
if (gnu_vector_type_p (type))
return !VECTOR_TYPE_P (value_type);
if (AGGREGATE_TYPE_P (type))
return type != TYPE_MAIN_VARIANT (value_type);
return false;
}
/* Add one non-braced element to the current constructor level.
This adjusts the current position within the constructor's type.
This may also start or terminate implicit levels
to handle a partly-braced initializer.
Once this has found the correct level for the new element,
it calls output_init_element.
IMPLICIT is true if value comes from pop_init_level (1),
the new initializer has been merged with the existing one
and thus no warnings should be emitted about overriding an
existing initializer. */
void
process_init_element (location_t loc, struct c_expr value, bool implicit,
struct obstack * braced_init_obstack)
{
tree orig_value = value.value;
int string_flag
= (orig_value != NULL_TREE && TREE_CODE (orig_value) == STRING_CST);
bool strict_string = value.original_code == STRING_CST;
bool was_designated = designator_depth != 0;
designator_depth = 0;
designator_erroneous = 0;
if (!implicit && value.value && !integer_zerop (value.value))
constructor_zeroinit = 0;
/* Handle superfluous braces around string cst as in
char x[] = {"foo"}; */
if (string_flag
&& constructor_type
&& !was_designated
&& TREE_CODE (constructor_type) == ARRAY_TYPE
&& INTEGRAL_TYPE_P (TREE_TYPE (constructor_type))
&& integer_zerop (constructor_unfilled_index))
{
if (constructor_stack->replacement_value.value)
error_init (loc, "excess elements in %<char%> array initializer");
constructor_stack->replacement_value = value;
return;
}
if (constructor_stack->replacement_value.value != NULL_TREE)
{
error_init (loc, "excess elements in struct initializer");
return;
}
/* Ignore elements of a brace group if it is entirely superfluous
and has already been diagnosed, or if the type is erroneous. */
if (constructor_type == NULL_TREE || constructor_type == error_mark_node)
return;
/* Ignore elements of an initializer for a variable-size type.
Those are diagnosed in digest_init. */
if (COMPLETE_TYPE_P (constructor_type)
&& !poly_int_tree_p (TYPE_SIZE (constructor_type)))
return;
if (!implicit && warn_designated_init && !was_designated
&& TREE_CODE (constructor_type) == RECORD_TYPE
&& lookup_attribute ("designated_init",
TYPE_ATTRIBUTES (constructor_type)))
warning_init (loc,
OPT_Wdesignated_init,
"positional initialization of field "
"in %<struct%> declared with %<designated_init%> attribute");
/* If we've exhausted any levels that didn't have braces,
pop them now. */
while (constructor_stack->implicit)
{
if (RECORD_OR_UNION_TYPE_P (constructor_type)
&& constructor_fields == NULL_TREE)
process_init_element (loc,
pop_init_level (loc, 1, braced_init_obstack,
last_init_list_comma),
true, braced_init_obstack);
else if ((TREE_CODE (constructor_type) == ARRAY_TYPE
|| gnu_vector_type_p (constructor_type))
&& constructor_max_index
&& tree_int_cst_lt (constructor_max_index,
constructor_index))
process_init_element (loc,
pop_init_level (loc, 1, braced_init_obstack,
last_init_list_comma),
true, braced_init_obstack);
else
break;
}
/* In the case of [LO ... HI] = VALUE, only evaluate VALUE once. */
if (constructor_range_stack)
{
/* If value is a compound literal and we'll be just using its
content, don't put it into a SAVE_EXPR. */
if (TREE_CODE (value.value) != COMPOUND_LITERAL_EXPR
|| !require_constant_value)
{
tree semantic_type = NULL_TREE;
if (TREE_CODE (value.value) == EXCESS_PRECISION_EXPR)
{
semantic_type = TREE_TYPE (value.value);
value.value = TREE_OPERAND (value.value, 0);
}
value.value = save_expr (value.value);
if (semantic_type)
value.value = build1 (EXCESS_PRECISION_EXPR, semantic_type,
value.value);
}
}
while (1)
{
if (TREE_CODE (constructor_type) == RECORD_TYPE)
{
tree fieldtype;
enum tree_code fieldcode;
if (constructor_fields == NULL_TREE)
{
pedwarn_init (loc, 0, "excess elements in struct initializer");
break;
}
fieldtype = TREE_TYPE (constructor_fields);
if (fieldtype != error_mark_node)
fieldtype = TYPE_MAIN_VARIANT (fieldtype);
fieldcode = TREE_CODE (fieldtype);
/* Error for non-static initialization of a flexible array member. */
if (fieldcode == ARRAY_TYPE
&& !require_constant_value
&& TYPE_SIZE (fieldtype) == NULL_TREE
&& DECL_CHAIN (constructor_fields) == NULL_TREE)
{
error_init (loc, "non-static initialization of a flexible "
"array member");
break;
}
/* Error for initialization of a flexible array member with
a string constant if the structure is in an array. E.g.:
struct S { int x; char y[]; };
struct S s[] = { { 1, "foo" } };
is invalid. */
if (string_flag
&& fieldcode == ARRAY_TYPE
&& constructor_depth > 1
&& TYPE_SIZE (fieldtype) == NULL_TREE
&& DECL_CHAIN (constructor_fields) == NULL_TREE)
{
bool in_array_p = false;
for (struct constructor_stack *p = constructor_stack;
p && p->type; p = p->next)
if (TREE_CODE (p->type) == ARRAY_TYPE)
{
in_array_p = true;
break;
}
if (in_array_p)
{
error_init (loc, "initialization of flexible array "
"member in a nested context");
break;
}
}
/* Accept a string constant to initialize a subarray. */
if (value.value != NULL_TREE
&& fieldcode == ARRAY_TYPE
&& INTEGRAL_TYPE_P (TREE_TYPE (fieldtype))
&& string_flag)
value.value = orig_value;
/* Otherwise, if we have come to a subaggregate,
and we don't have an element of its type, push into it. */
else if (value.value != NULL_TREE
&& initialize_elementwise_p (fieldtype, value.value))
{
push_init_level (loc, 1, braced_init_obstack);
continue;
}
if (value.value)
{
push_member_name (constructor_fields);
output_init_element (loc, value.value, value.original_type,
strict_string, fieldtype,
constructor_fields, true, implicit,
braced_init_obstack);
RESTORE_SPELLING_DEPTH (constructor_depth);
}
else
/* Do the bookkeeping for an element that was
directly output as a constructor. */
{
/* For a record, keep track of end position of last field. */
if (DECL_SIZE (constructor_fields))
constructor_bit_index
= size_binop_loc (input_location, PLUS_EXPR,
bit_position (constructor_fields),
DECL_SIZE (constructor_fields));
/* If the current field was the first one not yet written out,
it isn't now, so update. */
if (constructor_unfilled_fields == constructor_fields)
{
constructor_unfilled_fields = DECL_CHAIN (constructor_fields);
/* Skip any nameless bit fields. */
while (constructor_unfilled_fields != 0
&& (DECL_UNNAMED_BIT_FIELD
(constructor_unfilled_fields)))
constructor_unfilled_fields =
DECL_CHAIN (constructor_unfilled_fields);
}
}
constructor_fields = DECL_CHAIN (constructor_fields);
/* Skip any nameless bit fields at the beginning. */
while (constructor_fields != NULL_TREE
&& DECL_UNNAMED_BIT_FIELD (constructor_fields))
constructor_fields = DECL_CHAIN (constructor_fields);
}
else if (TREE_CODE (constructor_type) == UNION_TYPE)
{
tree fieldtype;
enum tree_code fieldcode;
if (constructor_fields == NULL_TREE)
{
pedwarn_init (loc, 0,
"excess elements in union initializer");
break;
}
fieldtype = TREE_TYPE (constructor_fields);
if (fieldtype != error_mark_node)
fieldtype = TYPE_MAIN_VARIANT (fieldtype);
fieldcode = TREE_CODE (fieldtype);
/* Warn that traditional C rejects initialization of unions.
We skip the warning if the value is zero. This is done
under the assumption that the zero initializer in user
code appears conditioned on e.g. __STDC__ to avoid
"missing initializer" warnings and relies on default
initialization to zero in the traditional C case.
We also skip the warning if the initializer is designated,
again on the assumption that this must be conditional on
__STDC__ anyway (and we've already complained about the
member-designator already). */
if (!in_system_header_at (input_location) && !constructor_designated
&& !(value.value && (integer_zerop (value.value)
|| real_zerop (value.value))))
warning (OPT_Wtraditional, "traditional C rejects initialization "
"of unions");
/* Accept a string constant to initialize a subarray. */
if (value.value != NULL_TREE
&& fieldcode == ARRAY_TYPE
&& INTEGRAL_TYPE_P (TREE_TYPE (fieldtype))
&& string_flag)
value.value = orig_value;
/* Otherwise, if we have come to a subaggregate,
and we don't have an element of its type, push into it. */
else if (value.value != NULL_TREE
&& initialize_elementwise_p (fieldtype, value.value))
{
push_init_level (loc, 1, braced_init_obstack);
continue;
}
if (value.value)
{
push_member_name (constructor_fields);
output_init_element (loc, value.value, value.original_type,
strict_string, fieldtype,
constructor_fields, true, implicit,
braced_init_obstack);
RESTORE_SPELLING_DEPTH (constructor_depth);
}
else
/* Do the bookkeeping for an element that was
directly output as a constructor. */
{
constructor_bit_index = DECL_SIZE (constructor_fields);
constructor_unfilled_fields = DECL_CHAIN (constructor_fields);
}
constructor_fields = NULL_TREE;
}
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
tree elttype = TYPE_MAIN_VARIANT (TREE_TYPE (constructor_type));
enum tree_code eltcode = TREE_CODE (elttype);
/* Accept a string constant to initialize a subarray. */
if (value.value != NULL_TREE
&& eltcode == ARRAY_TYPE
&& INTEGRAL_TYPE_P (TREE_TYPE (elttype))
&& string_flag)
value.value = orig_value;
/* Otherwise, if we have come to a subaggregate,
and we don't have an element of its type, push into it. */
else if (value.value != NULL_TREE
&& initialize_elementwise_p (elttype, value.value))
{
push_init_level (loc, 1, braced_init_obstack);
continue;
}
if (constructor_max_index != NULL_TREE
&& (tree_int_cst_lt (constructor_max_index, constructor_index)
|| integer_all_onesp (constructor_max_index)))
{
pedwarn_init (loc, 0,
"excess elements in array initializer");
break;
}
/* Now output the actual element. */
if (value.value)
{
push_array_bounds (tree_to_uhwi (constructor_index));
output_init_element (loc, value.value, value.original_type,
strict_string, elttype,
constructor_index, true, implicit,
braced_init_obstack);
RESTORE_SPELLING_DEPTH (constructor_depth);
}
constructor_index
= size_binop_loc (input_location, PLUS_EXPR,
constructor_index, bitsize_one_node);
if (!value.value)
/* If we are doing the bookkeeping for an element that was
directly output as a constructor, we must update
constructor_unfilled_index. */
constructor_unfilled_index = constructor_index;
}
else if (gnu_vector_type_p (constructor_type))
{
tree elttype = TYPE_MAIN_VARIANT (TREE_TYPE (constructor_type));
/* Do a basic check of initializer size. Note that vectors
always have a fixed size derived from their type. */
if (tree_int_cst_lt (constructor_max_index, constructor_index))
{
pedwarn_init (loc, 0,
"excess elements in vector initializer");
break;
}
/* Now output the actual element. */
if (value.value)
{
if (TREE_CODE (value.value) == VECTOR_CST)
elttype = TYPE_MAIN_VARIANT (constructor_type);
output_init_element (loc, value.value, value.original_type,
strict_string, elttype,
constructor_index, true, implicit,
braced_init_obstack);
}
constructor_index
= size_binop_loc (input_location,
PLUS_EXPR, constructor_index, bitsize_one_node);
if (!value.value)
/* If we are doing the bookkeeping for an element that was
directly output as a constructor, we must update
constructor_unfilled_index. */
constructor_unfilled_index = constructor_index;
}
/* Handle the sole element allowed in a braced initializer
for a scalar variable. */
else if (constructor_type != error_mark_node
&& constructor_fields == NULL_TREE)
{
pedwarn_init (loc, 0,
"excess elements in scalar initializer");
break;
}
else
{
if (value.value)
output_init_element (loc, value.value, value.original_type,
strict_string, constructor_type,
NULL_TREE, true, implicit,
braced_init_obstack);
constructor_fields = NULL_TREE;
}
/* Handle range initializers either at this level or anywhere higher
in the designator stack. */
if (constructor_range_stack)
{
struct constructor_range_stack *p, *range_stack;
int finish = 0;
range_stack = constructor_range_stack;
constructor_range_stack = 0;
while (constructor_stack != range_stack->stack)
{
gcc_assert (constructor_stack->implicit);
process_init_element (loc,
pop_init_level (loc, 1,
braced_init_obstack,
last_init_list_comma),
true, braced_init_obstack);
}
for (p = range_stack;
!p->range_end || tree_int_cst_equal (p->index, p->range_end);
p = p->prev)
{
gcc_assert (constructor_stack->implicit);
process_init_element (loc,
pop_init_level (loc, 1,
braced_init_obstack,
last_init_list_comma),
true, braced_init_obstack);
}
p->index = size_binop_loc (input_location,
PLUS_EXPR, p->index, bitsize_one_node);
if (tree_int_cst_equal (p->index, p->range_end) && !p->prev)
finish = 1;
while (1)
{
constructor_index = p->index;
constructor_fields = p->fields;
if (finish && p->range_end && p->index == p->range_start)
{
finish = 0;
p->prev = 0;
}
p = p->next;
if (!p)
break;
finish_implicit_inits (loc, braced_init_obstack);
push_init_level (loc, 2, braced_init_obstack);
p->stack = constructor_stack;
if (p->range_end && tree_int_cst_equal (p->index, p->range_end))
p->index = p->range_start;
}
if (!finish)
constructor_range_stack = range_stack;
continue;
}
break;
}
constructor_range_stack = 0;
}
/* Build a complete asm-statement, whose components are a CV_QUALIFIER
(guaranteed to be 'volatile' or null) and ARGS (represented using
an ASM_EXPR node). */
tree
build_asm_stmt (bool is_volatile, tree args)
{
if (is_volatile)
ASM_VOLATILE_P (args) = 1;
return add_stmt (args);
}
/* Build an asm-expr, whose components are a STRING, some OUTPUTS,
some INPUTS, and some CLOBBERS. The latter three may be NULL.
SIMPLE indicates whether there was anything at all after the
string in the asm expression -- asm("blah") and asm("blah" : )
are subtly different. We use a ASM_EXPR node to represent this.
LOC is the location of the asm, and IS_INLINE says whether this
is asm inline. */
tree
build_asm_expr (location_t loc, tree string, tree outputs, tree inputs,
tree clobbers, tree labels, bool simple, bool is_inline)
{
tree tail;
tree args;
int i;
const char *constraint;
const char **oconstraints;
bool allows_mem, allows_reg, is_inout;
int ninputs, noutputs;
ninputs = list_length (inputs);
noutputs = list_length (outputs);
oconstraints = (const char **) alloca (noutputs * sizeof (const char *));
string = resolve_asm_operand_names (string, outputs, inputs, labels);
/* Remove output conversions that change the type but not the mode. */
for (i = 0, tail = outputs; tail; ++i, tail = TREE_CHAIN (tail))
{
tree output = TREE_VALUE (tail);
output = c_fully_fold (output, false, NULL, true);
/* ??? Really, this should not be here. Users should be using a
proper lvalue, dammit. But there's a long history of using casts
in the output operands. In cases like longlong.h, this becomes a
primitive form of typechecking -- if the cast can be removed, then
the output operand had a type of the proper width; otherwise we'll
get an error. Gross, but ... */
STRIP_NOPS (output);
if (!lvalue_or_else (loc, output, lv_asm))
output = error_mark_node;
if (output != error_mark_node
&& (TREE_READONLY (output)
|| TYPE_READONLY (TREE_TYPE (output))
|| (RECORD_OR_UNION_TYPE_P (TREE_TYPE (output))
&& C_TYPE_FIELDS_READONLY (TREE_TYPE (output)))))
readonly_error (loc, output, lv_asm);
constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (tail)));
oconstraints[i] = constraint;
if (parse_output_constraint (&constraint, i, ninputs, noutputs,
&allows_mem, &allows_reg, &is_inout))
{
/* If the operand is going to end up in memory,
mark it addressable. */
if (!allows_reg && !c_mark_addressable (output))
output = error_mark_node;
if (!(!allows_reg && allows_mem)
&& output != error_mark_node
&& VOID_TYPE_P (TREE_TYPE (output)))
{
error_at (loc, "invalid use of void expression");
output = error_mark_node;
}
}
else
output = error_mark_node;
TREE_VALUE (tail) = output;
}
for (i = 0, tail = inputs; tail; ++i, tail = TREE_CHAIN (tail))
{
tree input;
constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (tail)));
input = TREE_VALUE (tail);
if (parse_input_constraint (&constraint, i, ninputs, noutputs, 0,
oconstraints, &allows_mem, &allows_reg))
{
/* If the operand is going to end up in memory,
mark it addressable. */
if (!allows_reg && allows_mem)
{
input = c_fully_fold (input, false, NULL, true);
/* Strip the nops as we allow this case. FIXME, this really
should be rejected or made deprecated. */
STRIP_NOPS (input);
if (!c_mark_addressable (input))
input = error_mark_node;
}
else
{
struct c_expr expr;
memset (&expr, 0, sizeof (expr));
expr.value = input;
expr = convert_lvalue_to_rvalue (loc, expr, true, false);
input = c_fully_fold (expr.value, false, NULL);
if (input != error_mark_node && VOID_TYPE_P (TREE_TYPE (input)))
{
error_at (loc, "invalid use of void expression");
input = error_mark_node;
}
}
}
else
input = error_mark_node;
TREE_VALUE (tail) = input;
}
/* ASMs with labels cannot have outputs. This should have been
enforced by the parser. */
gcc_assert (outputs == NULL || labels == NULL);
args = build_stmt (loc, ASM_EXPR, string, outputs, inputs, clobbers, labels);
/* asm statements without outputs, including simple ones, are treated
as volatile. */
ASM_INPUT_P (args) = simple;
ASM_VOLATILE_P (args) = (noutputs == 0);
ASM_INLINE_P (args) = is_inline;
return args;
}
/* Generate a goto statement to LABEL. LOC is the location of the
GOTO. */
tree
c_finish_goto_label (location_t loc, tree label)
{
tree decl = lookup_label_for_goto (loc, label);
if (!decl)
return NULL_TREE;
TREE_USED (decl) = 1;
{
add_stmt (build_predict_expr (PRED_GOTO, NOT_TAKEN));
tree t = build1 (GOTO_EXPR, void_type_node, decl);
SET_EXPR_LOCATION (t, loc);
return add_stmt (t);
}
}
/* Generate a computed goto statement to EXPR. LOC is the location of
the GOTO. */
tree
c_finish_goto_ptr (location_t loc, tree expr)
{
tree t;
pedwarn (loc, OPT_Wpedantic, "ISO C forbids %<goto *expr;%>");
expr = c_fully_fold (expr, false, NULL);
expr = convert (ptr_type_node, expr);
t = build1 (GOTO_EXPR, void_type_node, expr);
SET_EXPR_LOCATION (t, loc);
return add_stmt (t);
}
/* Generate a C `return' statement. RETVAL is the expression for what
to return, or a null pointer for `return;' with no value. LOC is
the location of the return statement, or the location of the expression,
if the statement has any. If ORIGTYPE is not NULL_TREE, it
is the original type of RETVAL. */
tree
c_finish_return (location_t loc, tree retval, tree origtype)
{
tree valtype = TREE_TYPE (TREE_TYPE (current_function_decl)), ret_stmt;
bool no_warning = false;
bool npc = false;
/* Use the expansion point to handle cases such as returning NULL
in a function returning void. */
location_t xloc = expansion_point_location_if_in_system_header (loc);
if (TREE_THIS_VOLATILE (current_function_decl))
warning_at (xloc, 0,
"function declared %<noreturn%> has a %<return%> statement");
if (retval)
{
tree semantic_type = NULL_TREE;
npc = null_pointer_constant_p (retval);
if (TREE_CODE (retval) == EXCESS_PRECISION_EXPR)
{
semantic_type = TREE_TYPE (retval);
retval = TREE_OPERAND (retval, 0);
}
retval = c_fully_fold (retval, false, NULL);
if (semantic_type
&& valtype != NULL_TREE
&& TREE_CODE (valtype) != VOID_TYPE)
retval = build1 (EXCESS_PRECISION_EXPR, semantic_type, retval);
}
if (!retval)
{
current_function_returns_null = 1;
if ((warn_return_type >= 0 || flag_isoc99)
&& valtype != NULL_TREE && TREE_CODE (valtype) != VOID_TYPE)
{
bool warned_here;
if (flag_isoc99)
warned_here = pedwarn
(loc, warn_return_type >= 0 ? OPT_Wreturn_type : 0,
"%<return%> with no value, in function returning non-void");
else
warned_here = warning_at
(loc, OPT_Wreturn_type,
"%<return%> with no value, in function returning non-void");
no_warning = true;
if (warned_here)
inform (DECL_SOURCE_LOCATION (current_function_decl),
"declared here");
}
}
else if (valtype == NULL_TREE || TREE_CODE (valtype) == VOID_TYPE)
{
current_function_returns_null = 1;
bool warned_here;
if (TREE_CODE (TREE_TYPE (retval)) != VOID_TYPE)
warned_here = pedwarn
(xloc, warn_return_type >= 0 ? OPT_Wreturn_type : 0,
"%<return%> with a value, in function returning void");
else
warned_here = pedwarn
(xloc, OPT_Wpedantic, "ISO C forbids "
"%<return%> with expression, in function returning void");
if (warned_here)
inform (DECL_SOURCE_LOCATION (current_function_decl),
"declared here");
}
else
{
tree t = convert_for_assignment (loc, UNKNOWN_LOCATION, valtype,
retval, origtype, ic_return,
npc, NULL_TREE, NULL_TREE, 0);
tree res = DECL_RESULT (current_function_decl);
tree inner;
bool save;
current_function_returns_value = 1;
if (t == error_mark_node)
return NULL_TREE;
save = in_late_binary_op;
if (TREE_CODE (TREE_TYPE (res)) == BOOLEAN_TYPE
|| TREE_CODE (TREE_TYPE (res)) == COMPLEX_TYPE
|| (TREE_CODE (TREE_TYPE (t)) == REAL_TYPE
&& (TREE_CODE (TREE_TYPE (res)) == INTEGER_TYPE
|| TREE_CODE (TREE_TYPE (res)) == ENUMERAL_TYPE)
&& sanitize_flags_p (SANITIZE_FLOAT_CAST)))
in_late_binary_op = true;
inner = t = convert (TREE_TYPE (res), t);
in_late_binary_op = save;
/* Strip any conversions, additions, and subtractions, and see if
we are returning the address of a local variable. Warn if so. */
while (1)
{
switch (TREE_CODE (inner))
{
CASE_CONVERT:
case NON_LVALUE_EXPR:
case PLUS_EXPR:
case POINTER_PLUS_EXPR:
inner = TREE_OPERAND (inner, 0);
continue;
case MINUS_EXPR:
/* If the second operand of the MINUS_EXPR has a pointer
type (or is converted from it), this may be valid, so
don't give a warning. */
{
tree op1 = TREE_OPERAND (inner, 1);
while (!POINTER_TYPE_P (TREE_TYPE (op1))
&& (CONVERT_EXPR_P (op1)
|| TREE_CODE (op1) == NON_LVALUE_EXPR))
op1 = TREE_OPERAND (op1, 0);
if (POINTER_TYPE_P (TREE_TYPE (op1)))
break;
inner = TREE_OPERAND (inner, 0);
continue;
}
case ADDR_EXPR:
inner = TREE_OPERAND (inner, 0);
while (REFERENCE_CLASS_P (inner)
&& !INDIRECT_REF_P (inner))
inner = TREE_OPERAND (inner, 0);
if (DECL_P (inner)
&& !DECL_EXTERNAL (inner)
&& !TREE_STATIC (inner)
&& DECL_CONTEXT (inner) == current_function_decl
&& POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl))))
{
if (TREE_CODE (inner) == LABEL_DECL)
warning_at (loc, OPT_Wreturn_local_addr,
"function returns address of label");
else
{
warning_at (loc, OPT_Wreturn_local_addr,
"function returns address of local variable");
tree zero = build_zero_cst (TREE_TYPE (res));
t = build2 (COMPOUND_EXPR, TREE_TYPE (res), t, zero);
}
}
break;
default:
break;
}
break;
}
retval = build2 (MODIFY_EXPR, TREE_TYPE (res), res, t);
SET_EXPR_LOCATION (retval, loc);
if (warn_sequence_point)
verify_sequence_points (retval);
}
ret_stmt = build_stmt (loc, RETURN_EXPR, retval);
TREE_NO_WARNING (ret_stmt) |= no_warning;
return add_stmt (ret_stmt);
}
struct c_switch {
/* The SWITCH_EXPR being built. */
tree switch_expr;
/* The original type of the testing expression, i.e. before the
default conversion is applied. */
tree orig_type;
/* A splay-tree mapping the low element of a case range to the high
element, or NULL_TREE if there is no high element. Used to
determine whether or not a new case label duplicates an old case
label. We need a tree, rather than simply a hash table, because
of the GNU case range extension. */
splay_tree cases;
/* The bindings at the point of the switch. This is used for
warnings crossing decls when branching to a case label. */
struct c_spot_bindings *bindings;
/* The next node on the stack. */
struct c_switch *next;
/* Remember whether the controlling expression had boolean type
before integer promotions for the sake of -Wswitch-bool. */
bool bool_cond_p;
};
/* A stack of the currently active switch statements. The innermost
switch statement is on the top of the stack. There is no need to
mark the stack for garbage collection because it is only active
during the processing of the body of a function, and we never
collect at that point. */
struct c_switch *c_switch_stack;
/* Start a C switch statement, testing expression EXP. Return the new
SWITCH_EXPR. SWITCH_LOC is the location of the `switch'.
SWITCH_COND_LOC is the location of the switch's condition.
EXPLICIT_CAST_P is true if the expression EXP has an explicit cast. */
tree
c_start_case (location_t switch_loc,
location_t switch_cond_loc,
tree exp, bool explicit_cast_p)
{
tree orig_type = error_mark_node;
bool bool_cond_p = false;
struct c_switch *cs;
if (exp != error_mark_node)
{
orig_type = TREE_TYPE (exp);
if (!INTEGRAL_TYPE_P (orig_type))
{
if (orig_type != error_mark_node)
{
error_at (switch_cond_loc, "switch quantity not an integer");
orig_type = error_mark_node;
}
exp = integer_zero_node;
}
else
{
tree type = TYPE_MAIN_VARIANT (orig_type);
tree e = exp;
/* Warn if the condition has boolean value. */
while (TREE_CODE (e) == COMPOUND_EXPR)
e = TREE_OPERAND (e, 1);
if ((TREE_CODE (type) == BOOLEAN_TYPE
|| truth_value_p (TREE_CODE (e)))
/* Explicit cast to int suppresses this warning. */
&& !(TREE_CODE (type) == INTEGER_TYPE
&& explicit_cast_p))
bool_cond_p = true;
if (!in_system_header_at (input_location)
&& (type == long_integer_type_node
|| type == long_unsigned_type_node))
warning_at (switch_cond_loc,
OPT_Wtraditional, "%<long%> switch expression not "
"converted to %<int%> in ISO C");
exp = c_fully_fold (exp, false, NULL);
exp = default_conversion (exp);
if (warn_sequence_point)
verify_sequence_points (exp);
}
}
/* Add this new SWITCH_EXPR to the stack. */
cs = XNEW (struct c_switch);
cs->switch_expr = build2 (SWITCH_EXPR, orig_type, exp, NULL_TREE);
SET_EXPR_LOCATION (cs->switch_expr, switch_loc);
cs->orig_type = orig_type;
cs->cases = splay_tree_new (case_compare, NULL, NULL);
cs->bindings = c_get_switch_bindings ();
cs->bool_cond_p = bool_cond_p;
cs->next = c_switch_stack;
c_switch_stack = cs;
return add_stmt (cs->switch_expr);
}
/* Process a case label at location LOC. */
tree
do_case (location_t loc, tree low_value, tree high_value)
{
tree label = NULL_TREE;
if (low_value && TREE_CODE (low_value) != INTEGER_CST)
{
low_value = c_fully_fold (low_value, false, NULL);
if (TREE_CODE (low_value) == INTEGER_CST)
pedwarn (loc, OPT_Wpedantic,
"case label is not an integer constant expression");
}
if (high_value && TREE_CODE (high_value) != INTEGER_CST)
{
high_value = c_fully_fold (high_value, false, NULL);
if (TREE_CODE (high_value) == INTEGER_CST)
pedwarn (input_location, OPT_Wpedantic,
"case label is not an integer constant expression");
}
if (c_switch_stack == NULL)
{
if (low_value)
error_at (loc, "case label not within a switch statement");
else
error_at (loc, "%<default%> label not within a switch statement");
return NULL_TREE;
}
if (c_check_switch_jump_warnings (c_switch_stack->bindings,
EXPR_LOCATION (c_switch_stack->switch_expr),
loc))
return NULL_TREE;
label = c_add_case_label (loc, c_switch_stack->cases,
SWITCH_COND (c_switch_stack->switch_expr),
low_value, high_value);
if (label == error_mark_node)
label = NULL_TREE;
return label;
}
/* Finish the switch statement. TYPE is the original type of the
controlling expression of the switch, or NULL_TREE. */
void
c_finish_case (tree body, tree type)
{
struct c_switch *cs = c_switch_stack;
location_t switch_location;
SWITCH_BODY (cs->switch_expr) = body;
/* Emit warnings as needed. */
switch_location = EXPR_LOCATION (cs->switch_expr);
c_do_switch_warnings (cs->cases, switch_location,
type ? type : TREE_TYPE (cs->switch_expr),
SWITCH_COND (cs->switch_expr), cs->bool_cond_p);
if (c_switch_covers_all_cases_p (cs->cases, TREE_TYPE (cs->switch_expr)))
SWITCH_ALL_CASES_P (cs->switch_expr) = 1;
/* Pop the stack. */
c_switch_stack = cs->next;
splay_tree_delete (cs->cases);
c_release_switch_bindings (cs->bindings);
XDELETE (cs);
}
/* Emit an if statement. IF_LOCUS is the location of the 'if'. COND,
THEN_BLOCK and ELSE_BLOCK are expressions to be used; ELSE_BLOCK
may be null. */
void
c_finish_if_stmt (location_t if_locus, tree cond, tree then_block,
tree else_block)
{
tree stmt;
stmt = build3 (COND_EXPR, void_type_node, cond, then_block, else_block);
SET_EXPR_LOCATION (stmt, if_locus);
add_stmt (stmt);
}
/* Emit a general-purpose loop construct. START_LOCUS is the location of
the beginning of the loop. COND is the loop condition. COND_IS_FIRST
is false for DO loops. INCR is the FOR increment expression. BODY is
the statement controlled by the loop. BLAB is the break label. CLAB is
the continue label. Everything is allowed to be NULL.
COND_LOCUS is the location of the loop condition, INCR_LOCUS is the
location of the FOR increment expression. */
void
c_finish_loop (location_t start_locus, location_t cond_locus, tree cond,
location_t incr_locus, tree incr, tree body, tree blab,
tree clab, bool cond_is_first)
{
tree entry = NULL, exit = NULL, t;
/* If the condition is zero don't generate a loop construct. */
if (cond && integer_zerop (cond))
{
if (cond_is_first)
{
t = build_and_jump (&blab);
SET_EXPR_LOCATION (t, start_locus);
add_stmt (t);
}
}
else
{
tree top = build1 (LABEL_EXPR, void_type_node, NULL_TREE);
/* If we have an exit condition, then we build an IF with gotos either
out of the loop, or to the top of it. If there's no exit condition,
then we just build a jump back to the top. */
exit = build_and_jump (&LABEL_EXPR_LABEL (top));
if (cond && !integer_nonzerop (cond))
{
/* Canonicalize the loop condition to the end. This means
generating a branch to the loop condition. Reuse the
continue label, if possible. */
if (cond_is_first)
{
if (incr || !clab)
{
entry = build1 (LABEL_EXPR, void_type_node, NULL_TREE);
t = build_and_jump (&LABEL_EXPR_LABEL (entry));
}
else
t = build1 (GOTO_EXPR, void_type_node, clab);
SET_EXPR_LOCATION (t, start_locus);
add_stmt (t);
}
t = build_and_jump (&blab);
exit = fold_build3_loc (cond_is_first ? start_locus : input_location,
COND_EXPR, void_type_node, cond, exit, t);
}
else
{
/* For the backward-goto's location of an unconditional loop
use the beginning of the body, or, if there is none, the
top of the loop. */
location_t loc = EXPR_LOCATION (expr_first (body));
if (loc == UNKNOWN_LOCATION)
loc = start_locus;
SET_EXPR_LOCATION (exit, loc);
}
add_stmt (top);
}
if (body)
add_stmt (body);
if (clab)
add_stmt (build1 (LABEL_EXPR, void_type_node, clab));
if (incr)
{
if (MAY_HAVE_DEBUG_MARKER_STMTS && incr_locus != UNKNOWN_LOCATION)
{
t = build0 (DEBUG_BEGIN_STMT, void_type_node);
SET_EXPR_LOCATION (t, incr_locus);
add_stmt (t);
}
add_stmt (incr);
}
if (entry)
add_stmt (entry);
if (MAY_HAVE_DEBUG_MARKER_STMTS && cond_locus != UNKNOWN_LOCATION)
{
t = build0 (DEBUG_BEGIN_STMT, void_type_node);
SET_EXPR_LOCATION (t, cond_locus);
add_stmt (t);
}
if (exit)
add_stmt (exit);
if (blab)
add_stmt (build1 (LABEL_EXPR, void_type_node, blab));
}
tree
c_finish_bc_stmt (location_t loc, tree *label_p, bool is_break)
{
bool skip;
tree label = *label_p;
/* In switch statements break is sometimes stylistically used after
a return statement. This can lead to spurious warnings about
control reaching the end of a non-void function when it is
inlined. Note that we are calling block_may_fallthru with
language specific tree nodes; this works because
block_may_fallthru returns true when given something it does not
understand. */
skip = !block_may_fallthru (cur_stmt_list);
if (!label)
{
if (!skip)
*label_p = label = create_artificial_label (loc);
}
else if (TREE_CODE (label) == LABEL_DECL)
;
else switch (TREE_INT_CST_LOW (label))
{
case 0:
if (is_break)
error_at (loc, "break statement not within loop or switch");
else
error_at (loc, "continue statement not within a loop");
return NULL_TREE;
case 1:
gcc_assert (is_break);
error_at (loc, "break statement used with OpenMP for loop");
return NULL_TREE;
case 2:
if (is_break)
error ("break statement within %<#pragma simd%> loop body");
else
error ("continue statement within %<#pragma simd%> loop body");
return NULL_TREE;
default:
gcc_unreachable ();
}
if (skip)
return NULL_TREE;
if (!is_break)
add_stmt (build_predict_expr (PRED_CONTINUE, NOT_TAKEN));
return add_stmt (build1 (GOTO_EXPR, void_type_node, label));
}
/* A helper routine for c_process_expr_stmt and c_finish_stmt_expr. */
static void
emit_side_effect_warnings (location_t loc, tree expr)
{
if (expr == error_mark_node)
;
else if (!TREE_SIDE_EFFECTS (expr))
{
if (!VOID_TYPE_P (TREE_TYPE (expr)) && !TREE_NO_WARNING (expr))
warning_at (loc, OPT_Wunused_value, "statement with no effect");
}
else if (TREE_CODE (expr) == COMPOUND_EXPR)
{
tree r = expr;
location_t cloc = loc;
while (TREE_CODE (r) == COMPOUND_EXPR)
{
if (EXPR_HAS_LOCATION (r))
cloc = EXPR_LOCATION (r);
r = TREE_OPERAND (r, 1);
}
if (!TREE_SIDE_EFFECTS (r)
&& !VOID_TYPE_P (TREE_TYPE (r))
&& !CONVERT_EXPR_P (r)
&& !TREE_NO_WARNING (r)
&& !TREE_NO_WARNING (expr))
warning_at (cloc, OPT_Wunused_value,
"right-hand operand of comma expression has no effect");
}
else
warn_if_unused_value (expr, loc);
}
/* Process an expression as if it were a complete statement. Emit
diagnostics, but do not call ADD_STMT. LOC is the location of the
statement. */
tree
c_process_expr_stmt (location_t loc, tree expr)
{
tree exprv;
if (!expr)
return NULL_TREE;
expr = c_fully_fold (expr, false, NULL);
if (warn_sequence_point)
verify_sequence_points (expr);
if (TREE_TYPE (expr) != error_mark_node
&& !COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (expr))
&& TREE_CODE (TREE_TYPE (expr)) != ARRAY_TYPE)
error_at (loc, "expression statement has incomplete type");
/* If we're not processing a statement expression, warn about unused values.
Warnings for statement expressions will be emitted later, once we figure
out which is the result. */
if (!STATEMENT_LIST_STMT_EXPR (cur_stmt_list)
&& warn_unused_value)
emit_side_effect_warnings (EXPR_LOC_OR_LOC (expr, loc), expr);
exprv = expr;
while (TREE_CODE (exprv) == COMPOUND_EXPR)
exprv = TREE_OPERAND (exprv, 1);
while (CONVERT_EXPR_P (exprv))
exprv = TREE_OPERAND (exprv, 0);
if (DECL_P (exprv)
|| handled_component_p (exprv)
|| TREE_CODE (exprv) == ADDR_EXPR)
mark_exp_read (exprv);
/* If the expression is not of a type to which we cannot assign a line
number, wrap the thing in a no-op NOP_EXPR. */
if (DECL_P (expr) || CONSTANT_CLASS_P (expr))
{
expr = build1 (NOP_EXPR, TREE_TYPE (expr), expr);
SET_EXPR_LOCATION (expr, loc);
}
return expr;
}
/* Emit an expression as a statement. LOC is the location of the
expression. */
tree
c_finish_expr_stmt (location_t loc, tree expr)
{
if (expr)
return add_stmt (c_process_expr_stmt (loc, expr));
else
return NULL;
}
/* Do the opposite and emit a statement as an expression. To begin,
create a new binding level and return it. */
tree
c_begin_stmt_expr (void)
{
tree ret;
/* We must force a BLOCK for this level so that, if it is not expanded
later, there is a way to turn off the entire subtree of blocks that
are contained in it. */
keep_next_level ();
ret = c_begin_compound_stmt (true);
c_bindings_start_stmt_expr (c_switch_stack == NULL
? NULL
: c_switch_stack->bindings);
/* Mark the current statement list as belonging to a statement list. */
STATEMENT_LIST_STMT_EXPR (ret) = 1;
return ret;
}
/* LOC is the location of the compound statement to which this body
belongs. */
tree
c_finish_stmt_expr (location_t loc, tree body)
{
tree last, type, tmp, val;
tree *last_p;
body = c_end_compound_stmt (loc, body, true);
c_bindings_end_stmt_expr (c_switch_stack == NULL
? NULL
: c_switch_stack->bindings);
/* Locate the last statement in BODY. See c_end_compound_stmt
about always returning a BIND_EXPR. */
last_p = &BIND_EXPR_BODY (body);
last = BIND_EXPR_BODY (body);
continue_searching:
if (TREE_CODE (last) == STATEMENT_LIST)
{
tree_stmt_iterator l = tsi_last (last);
while (!tsi_end_p (l) && TREE_CODE (tsi_stmt (l)) == DEBUG_BEGIN_STMT)
tsi_prev (&l);
/* This can happen with degenerate cases like ({ }). No value. */
if (tsi_end_p (l))
return body;
/* If we're supposed to generate side effects warnings, process
all of the statements except the last. */
if (warn_unused_value)
{
for (tree_stmt_iterator i = tsi_start (last);
tsi_stmt (i) != tsi_stmt (l); tsi_next (&i))
{
location_t tloc;
tree t = tsi_stmt (i);
tloc = EXPR_HAS_LOCATION (t) ? EXPR_LOCATION (t) : loc;
emit_side_effect_warnings (tloc, t);
}
}
last_p = tsi_stmt_ptr (l);
last = *last_p;
}
/* If the end of the list is exception related, then the list was split
by a call to push_cleanup. Continue searching. */
if (TREE_CODE (last) == TRY_FINALLY_EXPR
|| TREE_CODE (last) == TRY_CATCH_EXPR)
{
last_p = &TREE_OPERAND (last, 0);
last = *last_p;
goto continue_searching;
}
if (last == error_mark_node)
return last;
/* In the case that the BIND_EXPR is not necessary, return the
expression out from inside it. */
if ((last == BIND_EXPR_BODY (body)
/* Skip nested debug stmts. */
|| last == expr_first (BIND_EXPR_BODY (body)))
&& BIND_EXPR_VARS (body) == NULL)
{
/* Even if this looks constant, do not allow it in a constant
expression. */
last = c_wrap_maybe_const (last, true);
/* Do not warn if the return value of a statement expression is
unused. */
TREE_NO_WARNING (last) = 1;
return last;
}
/* Extract the type of said expression. */
type = TREE_TYPE (last);
/* If we're not returning a value at all, then the BIND_EXPR that
we already have is a fine expression to return. */
if (!type || VOID_TYPE_P (type))
return body;
/* Now that we've located the expression containing the value, it seems
silly to make voidify_wrapper_expr repeat the process. Create a
temporary of the appropriate type and stick it in a TARGET_EXPR. */
tmp = create_tmp_var_raw (type);
/* Unwrap a no-op NOP_EXPR as added by c_finish_expr_stmt. This avoids
tree_expr_nonnegative_p giving up immediately. */
val = last;
if (TREE_CODE (val) == NOP_EXPR
&& TREE_TYPE (val) == TREE_TYPE (TREE_OPERAND (val, 0)))
val = TREE_OPERAND (val, 0);
*last_p = build2 (MODIFY_EXPR, void_type_node, tmp, val);
SET_EXPR_LOCATION (*last_p, EXPR_LOCATION (last));
{
tree t = build4 (TARGET_EXPR, type, tmp, body, NULL_TREE, NULL_TREE);
SET_EXPR_LOCATION (t, loc);
return t;
}
}
/* Begin and end compound statements. This is as simple as pushing
and popping new statement lists from the tree. */
tree
c_begin_compound_stmt (bool do_scope)
{
tree stmt = push_stmt_list ();
if (do_scope)
push_scope ();
return stmt;
}
/* End a compound statement. STMT is the statement. LOC is the
location of the compound statement-- this is usually the location
of the opening brace. */
tree
c_end_compound_stmt (location_t loc, tree stmt, bool do_scope)
{
tree block = NULL;
if (do_scope)
{
if (c_dialect_objc ())
objc_clear_super_receiver ();
block = pop_scope ();
}
stmt = pop_stmt_list (stmt);
stmt = c_build_bind_expr (loc, block, stmt);
/* If this compound statement is nested immediately inside a statement
expression, then force a BIND_EXPR to be created. Otherwise we'll
do the wrong thing for ({ { 1; } }) or ({ 1; { } }). In particular,
STATEMENT_LISTs merge, and thus we can lose track of what statement
was really last. */
if (building_stmt_list_p ()
&& STATEMENT_LIST_STMT_EXPR (cur_stmt_list)
&& TREE_CODE (stmt) != BIND_EXPR)
{
stmt = build3 (BIND_EXPR, void_type_node, NULL, stmt, NULL);
TREE_SIDE_EFFECTS (stmt) = 1;
SET_EXPR_LOCATION (stmt, loc);
}
return stmt;
}
/* Queue a cleanup. CLEANUP is an expression/statement to be executed
when the current scope is exited. EH_ONLY is true when this is not
meant to apply to normal control flow transfer. */
void
push_cleanup (tree decl, tree cleanup, bool eh_only)
{
enum tree_code code;
tree stmt, list;
bool stmt_expr;
code = eh_only ? TRY_CATCH_EXPR : TRY_FINALLY_EXPR;
stmt = build_stmt (DECL_SOURCE_LOCATION (decl), code, NULL, cleanup);
add_stmt (stmt);
stmt_expr = STATEMENT_LIST_STMT_EXPR (cur_stmt_list);
list = push_stmt_list ();
TREE_OPERAND (stmt, 0) = list;
STATEMENT_LIST_STMT_EXPR (list) = stmt_expr;
}
/* Build a vector comparison of ARG0 and ARG1 using CODE opcode
into a value of TYPE type. Comparison is done via VEC_COND_EXPR. */
static tree
build_vec_cmp (tree_code code, tree type,
tree arg0, tree arg1)
{
tree zero_vec = build_zero_cst (type);
tree minus_one_vec = build_minus_one_cst (type);
tree cmp_type = truth_type_for (type);
tree cmp = build2 (code, cmp_type, arg0, arg1);
return build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
}
/* Build a binary-operation expression without default conversions.
CODE is the kind of expression to build.
LOCATION is the operator's location.
This function differs from `build' in several ways:
the data type of the result is computed and recorded in it,
warnings are generated if arg data types are invalid,
special handling for addition and subtraction of pointers is known,
and some optimization is done (operations on narrow ints
are done in the narrower type when that gives the same result).
Constant folding is also done before the result is returned.
Note that the operands will never have enumeral types, or function
or array types, because either they will have the default conversions
performed or they have both just been converted to some other type in which
the arithmetic is to be done. */
tree
build_binary_op (location_t location, enum tree_code code,
tree orig_op0, tree orig_op1, bool convert_p)
{
tree type0, type1, orig_type0, orig_type1;
tree eptype;
enum tree_code code0, code1;
tree op0, op1;
tree ret = error_mark_node;
const char *invalid_op_diag;
bool op0_int_operands, op1_int_operands;
bool int_const, int_const_or_overflow, int_operands;
/* Expression code to give to the expression when it is built.
Normally this is CODE, which is what the caller asked for,
but in some special cases we change it. */
enum tree_code resultcode = code;
/* Data type in which the computation is to be performed.
In the simplest cases this is the common type of the arguments. */
tree result_type = NULL;
/* When the computation is in excess precision, the type of the
final EXCESS_PRECISION_EXPR. */
tree semantic_result_type = NULL;
/* Nonzero means operands have already been type-converted
in whatever way is necessary.
Zero means they need to be converted to RESULT_TYPE. */
int converted = 0;
/* Nonzero means create the expression with this type, rather than
RESULT_TYPE. */
tree build_type = NULL_TREE;
/* Nonzero means after finally constructing the expression
convert it to this type. */
tree final_type = NULL_TREE;
/* Nonzero if this is an operation like MIN or MAX which can
safely be computed in short if both args are promoted shorts.
Also implies COMMON.
-1 indicates a bitwise operation; this makes a difference
in the exact conditions for when it is safe to do the operation
in a narrower mode. */
int shorten = 0;
/* Nonzero if this is a comparison operation;
if both args are promoted shorts, compare the original shorts.
Also implies COMMON. */
int short_compare = 0;
/* Nonzero if this is a right-shift operation, which can be computed on the
original short and then promoted if the operand is a promoted short. */
int short_shift = 0;
/* Nonzero means set RESULT_TYPE to the common type of the args. */
int common = 0;
/* True means types are compatible as far as ObjC is concerned. */
bool objc_ok;
/* True means this is an arithmetic operation that may need excess
precision. */
bool may_need_excess_precision;
/* True means this is a boolean operation that converts both its
operands to truth-values. */
bool boolean_op = false;
/* Remember whether we're doing / or %. */
bool doing_div_or_mod = false;
/* Remember whether we're doing << or >>. */
bool doing_shift = false;
/* Tree holding instrumentation expression. */
tree instrument_expr = NULL;
if (location == UNKNOWN_LOCATION)
location = input_location;
op0 = orig_op0;
op1 = orig_op1;
op0_int_operands = EXPR_INT_CONST_OPERANDS (orig_op0);
if (op0_int_operands)
op0 = remove_c_maybe_const_expr (op0);
op1_int_operands = EXPR_INT_CONST_OPERANDS (orig_op1);
if (op1_int_operands)
op1 = remove_c_maybe_const_expr (op1);
int_operands = (op0_int_operands && op1_int_operands);
if (int_operands)
{
int_const_or_overflow = (TREE_CODE (orig_op0) == INTEGER_CST
&& TREE_CODE (orig_op1) == INTEGER_CST);
int_const = (int_const_or_overflow
&& !TREE_OVERFLOW (orig_op0)
&& !TREE_OVERFLOW (orig_op1));
}
else
int_const = int_const_or_overflow = false;
/* Do not apply default conversion in mixed vector/scalar expression. */
if (convert_p
&& VECTOR_TYPE_P (TREE_TYPE (op0)) == VECTOR_TYPE_P (TREE_TYPE (op1)))
{
op0 = default_conversion (op0);
op1 = default_conversion (op1);
}
orig_type0 = type0 = TREE_TYPE (op0);
orig_type1 = type1 = TREE_TYPE (op1);
/* The expression codes of the data types of the arguments tell us
whether the arguments are integers, floating, pointers, etc. */
code0 = TREE_CODE (type0);
code1 = TREE_CODE (type1);
/* Strip NON_LVALUE_EXPRs, etc., since we aren't using as an lvalue. */
STRIP_TYPE_NOPS (op0);
STRIP_TYPE_NOPS (op1);
/* If an error was already reported for one of the arguments,
avoid reporting another error. */
if (code0 == ERROR_MARK || code1 == ERROR_MARK)
return error_mark_node;
if (code0 == POINTER_TYPE
&& reject_gcc_builtin (op0, EXPR_LOCATION (orig_op0)))
return error_mark_node;
if (code1 == POINTER_TYPE
&& reject_gcc_builtin (op1, EXPR_LOCATION (orig_op1)))
return error_mark_node;
if ((invalid_op_diag
= targetm.invalid_binary_op (code, type0, type1)))
{
error_at (location, invalid_op_diag);
return error_mark_node;
}
switch (code)
{
case PLUS_EXPR:
case MINUS_EXPR:
case MULT_EXPR:
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case EXACT_DIV_EXPR:
may_need_excess_precision = true;
break;
case EQ_EXPR:
case NE_EXPR:
case LE_EXPR:
case GE_EXPR:
case LT_EXPR:
case GT_EXPR:
/* Excess precision for implicit conversions of integers to
floating point in C11 and later. */
may_need_excess_precision = (flag_isoc11
&& (ANY_INTEGRAL_TYPE_P (type0)
|| ANY_INTEGRAL_TYPE_P (type1)));
break;
default:
may_need_excess_precision = false;
break;
}
if (TREE_CODE (op0) == EXCESS_PRECISION_EXPR)
{
op0 = TREE_OPERAND (op0, 0);
type0 = TREE_TYPE (op0);
}
else if (may_need_excess_precision
&& (eptype = excess_precision_type (type0)) != NULL_TREE)
{
type0 = eptype;
op0 = convert (eptype, op0);
}
if (TREE_CODE (op1) == EXCESS_PRECISION_EXPR)
{
op1 = TREE_OPERAND (op1, 0);
type1 = TREE_TYPE (op1);
}
else if (may_need_excess_precision
&& (eptype = excess_precision_type (type1)) != NULL_TREE)
{
type1 = eptype;
op1 = convert (eptype, op1);
}
objc_ok = objc_compare_types (type0, type1, -3, NULL_TREE);
/* In case when one of the operands of the binary operation is
a vector and another is a scalar -- convert scalar to vector. */
if ((gnu_vector_type_p (type0) && code1 != VECTOR_TYPE)
|| (gnu_vector_type_p (type1) && code0 != VECTOR_TYPE))
{
enum stv_conv convert_flag = scalar_to_vector (location, code, op0, op1,
true);
switch (convert_flag)
{
case stv_error:
return error_mark_node;
case stv_firstarg:
{
bool maybe_const = true;
tree sc;
sc = c_fully_fold (op0, false, &maybe_const);
sc = save_expr (sc);
sc = convert (TREE_TYPE (type1), sc);
op0 = build_vector_from_val (type1, sc);
if (!maybe_const)
op0 = c_wrap_maybe_const (op0, true);
orig_type0 = type0 = TREE_TYPE (op0);
code0 = TREE_CODE (type0);
converted = 1;
break;
}
case stv_secondarg:
{
bool maybe_const = true;
tree sc;
sc = c_fully_fold (op1, false, &maybe_const);
sc = save_expr (sc);
sc = convert (TREE_TYPE (type0), sc);
op1 = build_vector_from_val (type0, sc);
if (!maybe_const)
op1 = c_wrap_maybe_const (op1, true);
orig_type1 = type1 = TREE_TYPE (op1);
code1 = TREE_CODE (type1);
converted = 1;
break;
}
default:
break;
}
}
switch (code)
{
case PLUS_EXPR:
/* Handle the pointer + int case. */
if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
{
ret = pointer_int_sum (location, PLUS_EXPR, op0, op1);
goto return_build_binary_op;
}
else if (code1 == POINTER_TYPE && code0 == INTEGER_TYPE)
{
ret = pointer_int_sum (location, PLUS_EXPR, op1, op0);
goto return_build_binary_op;
}
else
common = 1;
break;
case MINUS_EXPR:
/* Subtraction of two similar pointers.
We must subtract them as integers, then divide by object size. */
if (code0 == POINTER_TYPE && code1 == POINTER_TYPE
&& comp_target_types (location, type0, type1))
{
ret = pointer_diff (location, op0, op1, &instrument_expr);
goto return_build_binary_op;
}
/* Handle pointer minus int. Just like pointer plus int. */
else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
{
ret = pointer_int_sum (location, MINUS_EXPR, op0, op1);
goto return_build_binary_op;
}
else
common = 1;
break;
case MULT_EXPR:
common = 1;
break;
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case EXACT_DIV_EXPR:
doing_div_or_mod = true;
warn_for_div_by_zero (location, op1);
if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE
|| code0 == FIXED_POINT_TYPE
|| code0 == COMPLEX_TYPE
|| gnu_vector_type_p (type0))
&& (code1 == INTEGER_TYPE || code1 == REAL_TYPE
|| code1 == FIXED_POINT_TYPE
|| code1 == COMPLEX_TYPE
|| gnu_vector_type_p (type1)))
{
enum tree_code tcode0 = code0, tcode1 = code1;
if (code0 == COMPLEX_TYPE || code0 == VECTOR_TYPE)
tcode0 = TREE_CODE (TREE_TYPE (TREE_TYPE (op0)));
if (code1 == COMPLEX_TYPE || code1 == VECTOR_TYPE)
tcode1 = TREE_CODE (TREE_TYPE (TREE_TYPE (op1)));
if (!((tcode0 == INTEGER_TYPE && tcode1 == INTEGER_TYPE)
|| (tcode0 == FIXED_POINT_TYPE && tcode1 == FIXED_POINT_TYPE)))
resultcode = RDIV_EXPR;
else
/* Although it would be tempting to shorten always here, that
loses on some targets, since the modulo instruction is
undefined if the quotient can't be represented in the
computation mode. We shorten only if unsigned or if
dividing by something we know != -1. */
shorten = (TYPE_UNSIGNED (TREE_TYPE (orig_op0))
|| (TREE_CODE (op1) == INTEGER_CST
&& !integer_all_onesp (op1)));
common = 1;
}
break;
case BIT_AND_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
shorten = -1;
/* Allow vector types which are not floating point types. */
else if (gnu_vector_type_p (type0)
&& gnu_vector_type_p (type1)
&& !VECTOR_FLOAT_TYPE_P (type0)
&& !VECTOR_FLOAT_TYPE_P (type1))
common = 1;
break;
case TRUNC_MOD_EXPR:
case FLOOR_MOD_EXPR:
doing_div_or_mod = true;
warn_for_div_by_zero (location, op1);
if (gnu_vector_type_p (type0)
&& gnu_vector_type_p (type1)
&& TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
&& TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE)
common = 1;
else if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
{
/* Although it would be tempting to shorten always here, that loses
on some targets, since the modulo instruction is undefined if the
quotient can't be represented in the computation mode. We shorten
only if unsigned or if dividing by something we know != -1. */
shorten = (TYPE_UNSIGNED (TREE_TYPE (orig_op0))
|| (TREE_CODE (op1) == INTEGER_CST
&& !integer_all_onesp (op1)));
common = 1;
}
break;
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
if ((code0 == INTEGER_TYPE || code0 == POINTER_TYPE
|| code0 == REAL_TYPE || code0 == COMPLEX_TYPE
|| code0 == FIXED_POINT_TYPE)
&& (code1 == INTEGER_TYPE || code1 == POINTER_TYPE
|| code1 == REAL_TYPE || code1 == COMPLEX_TYPE
|| code1 == FIXED_POINT_TYPE))
{
/* Result of these operations is always an int,
but that does not mean the operands should be
converted to ints! */
result_type = integer_type_node;
if (op0_int_operands)
{
op0 = c_objc_common_truthvalue_conversion (location, orig_op0);
op0 = remove_c_maybe_const_expr (op0);
}
else
op0 = c_objc_common_truthvalue_conversion (location, op0);
if (op1_int_operands)
{
op1 = c_objc_common_truthvalue_conversion (location, orig_op1);
op1 = remove_c_maybe_const_expr (op1);
}
else
op1 = c_objc_common_truthvalue_conversion (location, op1);
converted = 1;
boolean_op = true;
}
if (code == TRUTH_ANDIF_EXPR)
{
int_const_or_overflow = (int_operands
&& TREE_CODE (orig_op0) == INTEGER_CST
&& (op0 == truthvalue_false_node
|| TREE_CODE (orig_op1) == INTEGER_CST));
int_const = (int_const_or_overflow
&& !TREE_OVERFLOW (orig_op0)
&& (op0 == truthvalue_false_node
|| !TREE_OVERFLOW (orig_op1)));
}
else if (code == TRUTH_ORIF_EXPR)
{
int_const_or_overflow = (int_operands
&& TREE_CODE (orig_op0) == INTEGER_CST
&& (op0 == truthvalue_true_node
|| TREE_CODE (orig_op1) == INTEGER_CST));
int_const = (int_const_or_overflow
&& !TREE_OVERFLOW (orig_op0)
&& (op0 == truthvalue_true_node
|| !TREE_OVERFLOW (orig_op1)));
}
break;
/* Shift operations: result has same type as first operand;
always convert second operand to int.
Also set SHORT_SHIFT if shifting rightward. */
case RSHIFT_EXPR:
if (gnu_vector_type_p (type0)
&& gnu_vector_type_p (type1)
&& TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
&& TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE
&& known_eq (TYPE_VECTOR_SUBPARTS (type0),
TYPE_VECTOR_SUBPARTS (type1)))
{
result_type = type0;
converted = 1;
}
else if ((code0 == INTEGER_TYPE || code0 == FIXED_POINT_TYPE
|| (gnu_vector_type_p (type0)
&& TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE))
&& code1 == INTEGER_TYPE)
{
doing_shift = true;
if (TREE_CODE (op1) == INTEGER_CST)
{
if (tree_int_cst_sgn (op1) < 0)
{
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
warning_at (location, OPT_Wshift_count_negative,
"right shift count is negative");
}
else if (code0 == VECTOR_TYPE)
{
if (compare_tree_int (op1,
TYPE_PRECISION (TREE_TYPE (type0)))
>= 0)
{
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
warning_at (location, OPT_Wshift_count_overflow,
"right shift count >= width of vector element");
}
}
else
{
if (!integer_zerop (op1))
short_shift = 1;
if (compare_tree_int (op1, TYPE_PRECISION (type0)) >= 0)
{
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
warning_at (location, OPT_Wshift_count_overflow,
"right shift count >= width of type");
}
}
}
/* Use the type of the value to be shifted. */
result_type = type0;
/* Avoid converting op1 to result_type later. */
converted = 1;
}
break;
case LSHIFT_EXPR:
if (gnu_vector_type_p (type0)
&& gnu_vector_type_p (type1)
&& TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
&& TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE
&& known_eq (TYPE_VECTOR_SUBPARTS (type0),
TYPE_VECTOR_SUBPARTS (type1)))
{
result_type = type0;
converted = 1;
}
else if ((code0 == INTEGER_TYPE || code0 == FIXED_POINT_TYPE
|| (gnu_vector_type_p (type0)
&& TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE))
&& code1 == INTEGER_TYPE)
{
doing_shift = true;
if (TREE_CODE (op0) == INTEGER_CST
&& tree_int_cst_sgn (op0) < 0)
{
/* Don't reject a left shift of a negative value in a context
where a constant expression is needed in C90. */
if (flag_isoc99)
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
warning_at (location, OPT_Wshift_negative_value,
"left shift of negative value");
}
if (TREE_CODE (op1) == INTEGER_CST)
{
if (tree_int_cst_sgn (op1) < 0)
{
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
warning_at (location, OPT_Wshift_count_negative,
"left shift count is negative");
}
else if (code0 == VECTOR_TYPE)
{
if (compare_tree_int (op1,
TYPE_PRECISION (TREE_TYPE (type0)))
>= 0)
{
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
warning_at (location, OPT_Wshift_count_overflow,
"left shift count >= width of vector element");
}
}
else if (compare_tree_int (op1, TYPE_PRECISION (type0)) >= 0)
{
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
warning_at (location, OPT_Wshift_count_overflow,
"left shift count >= width of type");
}
else if (TREE_CODE (op0) == INTEGER_CST
&& maybe_warn_shift_overflow (location, op0, op1)
&& flag_isoc99)
int_const = false;
}
/* Use the type of the value to be shifted. */
result_type = type0;
/* Avoid converting op1 to result_type later. */
converted = 1;
}
break;
case EQ_EXPR:
case NE_EXPR:
if (gnu_vector_type_p (type0) && gnu_vector_type_p (type1))
{
tree intt;
if (!vector_types_compatible_elements_p (type0, type1))
{
error_at (location, "comparing vectors with different "
"element types");
return error_mark_node;
}
if (maybe_ne (TYPE_VECTOR_SUBPARTS (type0),
TYPE_VECTOR_SUBPARTS (type1)))
{
error_at (location, "comparing vectors with different "
"number of elements");
return error_mark_node;
}
/* It's not precisely specified how the usual arithmetic
conversions apply to the vector types. Here, we use
the unsigned type if one of the operands is signed and
the other one is unsigned. */
if (TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1))
{
if (!TYPE_UNSIGNED (type0))
op0 = build1 (VIEW_CONVERT_EXPR, type1, op0);
else
op1 = build1 (VIEW_CONVERT_EXPR, type0, op1);
warning_at (location, OPT_Wsign_compare, "comparison between "
"types %qT and %qT", type0, type1);
}
/* Always construct signed integer vector type. */
intt = c_common_type_for_size (GET_MODE_BITSIZE
(SCALAR_TYPE_MODE
(TREE_TYPE (type0))), 0);
if (!intt)
{
error_at (location, "could not find an integer type "
"of the same size as %qT",
TREE_TYPE (type0));
return error_mark_node;
}
result_type = build_opaque_vector_type (intt,
TYPE_VECTOR_SUBPARTS (type0));
converted = 1;
ret = build_vec_cmp (resultcode, result_type, op0, op1);
goto return_build_binary_op;
}
if (FLOAT_TYPE_P (type0) || FLOAT_TYPE_P (type1))
warning_at (location,
OPT_Wfloat_equal,
"comparing floating-point with %<==%> or %<!=%> is unsafe");
/* Result of comparison is always int,
but don't convert the args to int! */
build_type = integer_type_node;
if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE
|| code0 == FIXED_POINT_TYPE || code0 == COMPLEX_TYPE)
&& (code1 == INTEGER_TYPE || code1 == REAL_TYPE
|| code1 == FIXED_POINT_TYPE || code1 == COMPLEX_TYPE))
short_compare = 1;
else if (code0 == POINTER_TYPE && null_pointer_constant_p (orig_op1))
{
if (TREE_CODE (op0) == ADDR_EXPR
&& decl_with_nonnull_addr_p (TREE_OPERAND (op0, 0))
&& !from_macro_expansion_at (location))
{
if (code == EQ_EXPR)
warning_at (location,
OPT_Waddress,
"the comparison will always evaluate as %<false%> "
"for the address of %qD will never be NULL",
TREE_OPERAND (op0, 0));
else
warning_at (location,
OPT_Waddress,
"the comparison will always evaluate as %<true%> "
"for the address of %qD will never be NULL",
TREE_OPERAND (op0, 0));
}
result_type = type0;
}
else if (code1 == POINTER_TYPE && null_pointer_constant_p (orig_op0))
{
if (TREE_CODE (op1) == ADDR_EXPR
&& decl_with_nonnull_addr_p (TREE_OPERAND (op1, 0))
&& !from_macro_expansion_at (location))
{
if (code == EQ_EXPR)
warning_at (location,
OPT_Waddress,
"the comparison will always evaluate as %<false%> "
"for the address of %qD will never be NULL",
TREE_OPERAND (op1, 0));
else
warning_at (location,
OPT_Waddress,
"the comparison will always evaluate as %<true%> "
"for the address of %qD will never be NULL",
TREE_OPERAND (op1, 0));
}
result_type = type1;
}
else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE)
{
tree tt0 = TREE_TYPE (type0);
tree tt1 = TREE_TYPE (type1);
addr_space_t as0 = TYPE_ADDR_SPACE (tt0);
addr_space_t as1 = TYPE_ADDR_SPACE (tt1);
addr_space_t as_common = ADDR_SPACE_GENERIC;
/* Anything compares with void *. void * compares with anything.
Otherwise, the targets must be compatible
and both must be object or both incomplete. */
if (comp_target_types (location, type0, type1))
result_type = common_pointer_type (type0, type1);
else if (!addr_space_superset (as0, as1, &as_common))
{
error_at (location, "comparison of pointers to "
"disjoint address spaces");
return error_mark_node;
}
else if (VOID_TYPE_P (tt0) && !TYPE_ATOMIC (tt0))
{
if (pedantic && TREE_CODE (tt1) == FUNCTION_TYPE)
pedwarn (location, OPT_Wpedantic, "ISO C forbids "
"comparison of %<void *%> with function pointer");
}
else if (VOID_TYPE_P (tt1) && !TYPE_ATOMIC (tt1))
{
if (pedantic && TREE_CODE (tt0) == FUNCTION_TYPE)
pedwarn (location, OPT_Wpedantic, "ISO C forbids "
"comparison of %<void *%> with function pointer");
}
else
/* Avoid warning about the volatile ObjC EH puts on decls. */
if (!objc_ok)
pedwarn (location, 0,
"comparison of distinct pointer types lacks a cast");
if (result_type == NULL_TREE)
{
int qual = ENCODE_QUAL_ADDR_SPACE (as_common);
result_type = build_pointer_type
(build_qualified_type (void_type_node, qual));
}
}
else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
{
result_type = type0;
pedwarn (location, 0, "comparison between pointer and integer");
}
else if (code0 == INTEGER_TYPE && code1 == POINTER_TYPE)
{
result_type = type1;
pedwarn (location, 0, "comparison between pointer and integer");
}
if ((TREE_CODE (TREE_TYPE (orig_op0)) == BOOLEAN_TYPE
|| truth_value_p (TREE_CODE (orig_op0)))
^ (TREE_CODE (TREE_TYPE (orig_op1)) == BOOLEAN_TYPE
|| truth_value_p (TREE_CODE (orig_op1))))
maybe_warn_bool_compare (location, code, orig_op0, orig_op1);
break;
case LE_EXPR:
case GE_EXPR:
case LT_EXPR:
case GT_EXPR:
if (gnu_vector_type_p (type0) && gnu_vector_type_p (type1))
{
tree intt;
if (!vector_types_compatible_elements_p (type0, type1))
{
error_at (location, "comparing vectors with different "
"element types");
return error_mark_node;
}
if (maybe_ne (TYPE_VECTOR_SUBPARTS (type0),
TYPE_VECTOR_SUBPARTS (type1)))
{
error_at (location, "comparing vectors with different "
"number of elements");
return error_mark_node;
}
/* It's not precisely specified how the usual arithmetic
conversions apply to the vector types. Here, we use
the unsigned type if one of the operands is signed and
the other one is unsigned. */
if (TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1))
{
if (!TYPE_UNSIGNED (type0))
op0 = build1 (VIEW_CONVERT_EXPR, type1, op0);
else
op1 = build1 (VIEW_CONVERT_EXPR, type0, op1);
warning_at (location, OPT_Wsign_compare, "comparison between "
"types %qT and %qT", type0, type1);
}
/* Always construct signed integer vector type. */
intt = c_common_type_for_size (GET_MODE_BITSIZE
(SCALAR_TYPE_MODE
(TREE_TYPE (type0))), 0);
if (!intt)
{
error_at (location, "could not find an integer type "
"of the same size as %qT",
TREE_TYPE (type0));
return error_mark_node;
}
result_type = build_opaque_vector_type (intt,
TYPE_VECTOR_SUBPARTS (type0));
converted = 1;
ret = build_vec_cmp (resultcode, result_type, op0, op1);
goto return_build_binary_op;
}
build_type = integer_type_node;
if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE
|| code0 == FIXED_POINT_TYPE)
&& (code1 == INTEGER_TYPE || code1 == REAL_TYPE
|| code1 == FIXED_POINT_TYPE))
short_compare = 1;
else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE)
{
addr_space_t as0 = TYPE_ADDR_SPACE (TREE_TYPE (type0));
addr_space_t as1 = TYPE_ADDR_SPACE (TREE_TYPE (type1));
addr_space_t as_common;
if (comp_target_types (location, type0, type1))
{
result_type = common_pointer_type (type0, type1);
if (!COMPLETE_TYPE_P (TREE_TYPE (type0))
!= !COMPLETE_TYPE_P (TREE_TYPE (type1)))
pedwarn (location, 0,
"comparison of complete and incomplete pointers");
else if (TREE_CODE (TREE_TYPE (type0)) == FUNCTION_TYPE)
pedwarn (location, OPT_Wpedantic, "ISO C forbids "
"ordered comparisons of pointers to functions");
else if (null_pointer_constant_p (orig_op0)
|| null_pointer_constant_p (orig_op1))
warning_at (location, OPT_Wextra,
"ordered comparison of pointer with null pointer");
}
else if (!addr_space_superset (as0, as1, &as_common))
{
error_at (location, "comparison of pointers to "
"disjoint address spaces");
return error_mark_node;
}
else
{
int qual = ENCODE_QUAL_ADDR_SPACE (as_common);
result_type = build_pointer_type
(build_qualified_type (void_type_node, qual));
pedwarn (location, 0,
"comparison of distinct pointer types lacks a cast");
}
}
else if (code0 == POINTER_TYPE && null_pointer_constant_p (orig_op1))
{
result_type = type0;
if (pedantic)
pedwarn (location, OPT_Wpedantic,
"ordered comparison of pointer with integer zero");
else if (extra_warnings)
warning_at (location, OPT_Wextra,
"ordered comparison of pointer with integer zero");
}
else if (code1 == POINTER_TYPE && null_pointer_constant_p (orig_op0))
{
result_type = type1;
if (pedantic)
pedwarn (location, OPT_Wpedantic,
"ordered comparison of pointer with integer zero");
else if (extra_warnings)
warning_at (location, OPT_Wextra,
"ordered comparison of pointer with integer zero");
}
else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
{
result_type = type0;
pedwarn (location, 0, "comparison between pointer and integer");
}
else if (code0 == INTEGER_TYPE && code1 == POINTER_TYPE)
{
result_type = type1;
pedwarn (location, 0, "comparison between pointer and integer");
}
if ((code0 == POINTER_TYPE || code1 == POINTER_TYPE)
&& sanitize_flags_p (SANITIZE_POINTER_COMPARE))
{
op0 = save_expr (op0);
op1 = save_expr (op1);
tree tt = builtin_decl_explicit (BUILT_IN_ASAN_POINTER_COMPARE);
instrument_expr = build_call_expr_loc (location, tt, 2, op0, op1);
}
if ((TREE_CODE (TREE_TYPE (orig_op0)) == BOOLEAN_TYPE
|| truth_value_p (TREE_CODE (orig_op0)))
^ (TREE_CODE (TREE_TYPE (orig_op1)) == BOOLEAN_TYPE
|| truth_value_p (TREE_CODE (orig_op1))))
maybe_warn_bool_compare (location, code, orig_op0, orig_op1);
break;
default:
gcc_unreachable ();
}
if (code0 == ERROR_MARK || code1 == ERROR_MARK)
return error_mark_node;
if (gnu_vector_type_p (type0)
&& gnu_vector_type_p (type1)
&& (!tree_int_cst_equal (TYPE_SIZE (type0), TYPE_SIZE (type1))
|| !vector_types_compatible_elements_p (type0, type1)))
{
gcc_rich_location richloc (location);
maybe_range_label_for_tree_type_mismatch
label_for_op0 (orig_op0, orig_op1),
label_for_op1 (orig_op1, orig_op0);
richloc.maybe_add_expr (orig_op0, &label_for_op0);
richloc.maybe_add_expr (orig_op1, &label_for_op1);
binary_op_error (&richloc, code, type0, type1);
return error_mark_node;
}
if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE || code0 == COMPLEX_TYPE
|| code0 == FIXED_POINT_TYPE
|| gnu_vector_type_p (type0))
&&
(code1 == INTEGER_TYPE || code1 == REAL_TYPE || code1 == COMPLEX_TYPE
|| code1 == FIXED_POINT_TYPE
|| gnu_vector_type_p (type1)))
{
bool first_complex = (code0 == COMPLEX_TYPE);
bool second_complex = (code1 == COMPLEX_TYPE);
int none_complex = (!first_complex && !second_complex);
if (shorten || common || short_compare)
{
result_type = c_common_type (type0, type1);
do_warn_double_promotion (result_type, type0, type1,
"implicit conversion from %qT to %qT "
"to match other operand of binary "
"expression",
location);
if (result_type == error_mark_node)
return error_mark_node;
}
if (first_complex != second_complex
&& (code == PLUS_EXPR
|| code == MINUS_EXPR
|| code == MULT_EXPR
|| (code == TRUNC_DIV_EXPR && first_complex))
&& TREE_CODE (TREE_TYPE (result_type)) == REAL_TYPE
&& flag_signed_zeros)
{
/* An operation on mixed real/complex operands must be
handled specially, but the language-independent code can
more easily optimize the plain complex arithmetic if
-fno-signed-zeros. */
tree real_type = TREE_TYPE (result_type);
tree real, imag;
if (type0 != orig_type0 || type1 != orig_type1)
{
gcc_assert (may_need_excess_precision && common);
semantic_result_type = c_common_type (orig_type0, orig_type1);
}
if (first_complex)
{
if (TREE_TYPE (op0) != result_type)
op0 = convert_and_check (location, result_type, op0);
if (TREE_TYPE (op1) != real_type)
op1 = convert_and_check (location, real_type, op1);
}
else
{
if (TREE_TYPE (op0) != real_type)
op0 = convert_and_check (location, real_type, op0);
if (TREE_TYPE (op1) != result_type)
op1 = convert_and_check (location, result_type, op1);
}
if (TREE_CODE (op0) == ERROR_MARK || TREE_CODE (op1) == ERROR_MARK)
return error_mark_node;
if (first_complex)
{
op0 = save_expr (op0);
real = build_unary_op (EXPR_LOCATION (orig_op0), REALPART_EXPR,
op0, true);
imag = build_unary_op (EXPR_LOCATION (orig_op0), IMAGPART_EXPR,
op0, true);
switch (code)
{
case MULT_EXPR:
case TRUNC_DIV_EXPR:
op1 = save_expr (op1);
imag = build2 (resultcode, real_type, imag, op1);
/* Fall through. */
case PLUS_EXPR:
case MINUS_EXPR:
real = build2 (resultcode, real_type, real, op1);
break;
default:
gcc_unreachable();
}
}
else
{
op1 = save_expr (op1);
real = build_unary_op (EXPR_LOCATION (orig_op1), REALPART_EXPR,
op1, true);
imag = build_unary_op (EXPR_LOCATION (orig_op1), IMAGPART_EXPR,
op1, true);
switch (code)
{
case MULT_EXPR:
op0 = save_expr (op0);
imag = build2 (resultcode, real_type, op0, imag);
/* Fall through. */
case PLUS_EXPR:
real = build2 (resultcode, real_type, op0, real);
break;
case MINUS_EXPR:
real = build2 (resultcode, real_type, op0, real);
imag = build1 (NEGATE_EXPR, real_type, imag);
break;
default:
gcc_unreachable();
}
}
ret = build2 (COMPLEX_EXPR, result_type, real, imag);
goto return_build_binary_op;
}
/* For certain operations (which identify themselves by shorten != 0)
if both args were extended from the same smaller type,
do the arithmetic in that type and then extend.
shorten !=0 and !=1 indicates a bitwise operation.
For them, this optimization is safe only if
both args are zero-extended or both are sign-extended.
Otherwise, we might change the result.
Eg, (short)-1 | (unsigned short)-1 is (int)-1
but calculated in (unsigned short) it would be (unsigned short)-1. */
if (shorten && none_complex)
{
final_type = result_type;
result_type = shorten_binary_op (result_type, op0, op1,
shorten == -1);
}
/* Shifts can be shortened if shifting right. */
if (short_shift)
{
int unsigned_arg;
tree arg0 = get_narrower (op0, &unsigned_arg);
final_type = result_type;
if (arg0 == op0 && final_type == TREE_TYPE (op0))
unsigned_arg = TYPE_UNSIGNED (TREE_TYPE (op0));
if (TYPE_PRECISION (TREE_TYPE (arg0)) < TYPE_PRECISION (result_type)
&& tree_int_cst_sgn (op1) > 0
/* We can shorten only if the shift count is less than the
number of bits in the smaller type size. */
&& compare_tree_int (op1, TYPE_PRECISION (TREE_TYPE (arg0))) < 0
/* We cannot drop an unsigned shift after sign-extension. */
&& (!TYPE_UNSIGNED (final_type) || unsigned_arg))
{
/* Do an unsigned shift if the operand was zero-extended. */
result_type
= c_common_signed_or_unsigned_type (unsigned_arg,
TREE_TYPE (arg0));
/* Convert value-to-be-shifted to that type. */
if (TREE_TYPE (op0) != result_type)
op0 = convert (result_type, op0);
converted = 1;
}
}
/* Comparison operations are shortened too but differently.
They identify themselves by setting short_compare = 1. */
if (short_compare)
{
/* Don't write &op0, etc., because that would prevent op0
from being kept in a register.
Instead, make copies of the our local variables and
pass the copies by reference, then copy them back afterward. */
tree xop0 = op0, xop1 = op1, xresult_type = result_type;
enum tree_code xresultcode = resultcode;
tree val
= shorten_compare (location, &xop0, &xop1, &xresult_type,
&xresultcode);
if (val != NULL_TREE)
{
ret = val;
goto return_build_binary_op;
}
op0 = xop0, op1 = xop1;
converted = 1;
resultcode = xresultcode;
if (c_inhibit_evaluation_warnings == 0)
{
bool op0_maybe_const = true;
bool op1_maybe_const = true;
tree orig_op0_folded, orig_op1_folded;
if (in_late_binary_op)
{
orig_op0_folded = orig_op0;
orig_op1_folded = orig_op1;
}
else
{
/* Fold for the sake of possible warnings, as in
build_conditional_expr. This requires the
"original" values to be folded, not just op0 and
op1. */
c_inhibit_evaluation_warnings++;
op0 = c_fully_fold (op0, require_constant_value,
&op0_maybe_const);
op1 = c_fully_fold (op1, require_constant_value,
&op1_maybe_const);
c_inhibit_evaluation_warnings--;
orig_op0_folded = c_fully_fold (orig_op0,
require_constant_value,
NULL);
orig_op1_folded = c_fully_fold (orig_op1,
require_constant_value,
NULL);
}
if (warn_sign_compare)
warn_for_sign_compare (location, orig_op0_folded,
orig_op1_folded, op0, op1,
result_type, resultcode);
if (!in_late_binary_op && !int_operands)
{
if (!op0_maybe_const || TREE_CODE (op0) != INTEGER_CST)
op0 = c_wrap_maybe_const (op0, !op0_maybe_const);
if (!op1_maybe_const || TREE_CODE (op1) != INTEGER_CST)
op1 = c_wrap_maybe_const (op1, !op1_maybe_const);
}
}
}
}
/* At this point, RESULT_TYPE must be nonzero to avoid an error message.
If CONVERTED is zero, both args will be converted to type RESULT_TYPE.
Then the expression will be built.
It will be given type FINAL_TYPE if that is nonzero;
otherwise, it will be given type RESULT_TYPE. */
if (!result_type)
{
/* Favor showing any expression locations that are available. */
op_location_t oploc (location, UNKNOWN_LOCATION);
binary_op_rich_location richloc (oploc, orig_op0, orig_op1, true);
binary_op_error (&richloc, code, TREE_TYPE (op0), TREE_TYPE (op1));
return error_mark_node;
}
if (build_type == NULL_TREE)
{
build_type = result_type;
if ((type0 != orig_type0 || type1 != orig_type1)
&& !boolean_op)
{
gcc_assert (may_need_excess_precision && common);
semantic_result_type = c_common_type (orig_type0, orig_type1);
}
}
if (!converted)
{
op0 = ep_convert_and_check (location, result_type, op0,
semantic_result_type);
op1 = ep_convert_and_check (location, result_type, op1,
semantic_result_type);
/* This can happen if one operand has a vector type, and the other
has a different type. */
if (TREE_CODE (op0) == ERROR_MARK || TREE_CODE (op1) == ERROR_MARK)
return error_mark_node;
}
if (sanitize_flags_p ((SANITIZE_SHIFT
| SANITIZE_DIVIDE | SANITIZE_FLOAT_DIVIDE))
&& current_function_decl != NULL_TREE
&& (doing_div_or_mod || doing_shift)
&& !require_constant_value)
{
/* OP0 and/or OP1 might have side-effects. */
op0 = save_expr (op0);
op1 = save_expr (op1);
op0 = c_fully_fold (op0, false, NULL);
op1 = c_fully_fold (op1, false, NULL);
if (doing_div_or_mod && (sanitize_flags_p ((SANITIZE_DIVIDE
| SANITIZE_FLOAT_DIVIDE))))
instrument_expr = ubsan_instrument_division (location, op0, op1);
else if (doing_shift && sanitize_flags_p (SANITIZE_SHIFT))
instrument_expr = ubsan_instrument_shift (location, code, op0, op1);
}
/* Treat expressions in initializers specially as they can't trap. */
if (int_const_or_overflow)
ret = (require_constant_value
? fold_build2_initializer_loc (location, resultcode, build_type,
op0, op1)
: fold_build2_loc (location, resultcode, build_type, op0, op1));
else
ret = build2 (resultcode, build_type, op0, op1);
if (final_type != NULL_TREE)
ret = convert (final_type, ret);
return_build_binary_op:
gcc_assert (ret != error_mark_node);
if (TREE_CODE (ret) == INTEGER_CST && !TREE_OVERFLOW (ret) && !int_const)
ret = (int_operands
? note_integer_operands (ret)
: build1 (NOP_EXPR, TREE_TYPE (ret), ret));
else if (TREE_CODE (ret) != INTEGER_CST && int_operands
&& !in_late_binary_op)
ret = note_integer_operands (ret);
protected_set_expr_location (ret, location);
if (instrument_expr != NULL)
ret = fold_build2 (COMPOUND_EXPR, TREE_TYPE (ret),
instrument_expr, ret);
if (semantic_result_type)
ret = build1_loc (location, EXCESS_PRECISION_EXPR,
semantic_result_type, ret);
return ret;
}
/* Convert EXPR to be a truth-value, validating its type for this
purpose. LOCATION is the source location for the expression. */
tree
c_objc_common_truthvalue_conversion (location_t location, tree expr)
{
bool int_const, int_operands;
switch (TREE_CODE (TREE_TYPE (expr)))
{
case ARRAY_TYPE:
error_at (location, "used array that cannot be converted to pointer where scalar is required");
return error_mark_node;
case RECORD_TYPE:
error_at (location, "used struct type value where scalar is required");
return error_mark_node;
case UNION_TYPE:
error_at (location, "used union type value where scalar is required");
return error_mark_node;
case VOID_TYPE:
error_at (location, "void value not ignored as it ought to be");
return error_mark_node;
case POINTER_TYPE:
if (reject_gcc_builtin (expr))
return error_mark_node;
break;
case FUNCTION_TYPE:
gcc_unreachable ();
case VECTOR_TYPE:
error_at (location, "used vector type where scalar is required");
return error_mark_node;
default:
break;
}
int_const = (TREE_CODE (expr) == INTEGER_CST && !TREE_OVERFLOW (expr));
int_operands = EXPR_INT_CONST_OPERANDS (expr);
if (int_operands && TREE_CODE (expr) != INTEGER_CST)
{
expr = remove_c_maybe_const_expr (expr);
expr = build2 (NE_EXPR, integer_type_node, expr,
convert (TREE_TYPE (expr), integer_zero_node));
expr = note_integer_operands (expr);
}
else
/* ??? Should we also give an error for vectors rather than leaving
those to give errors later? */
expr = c_common_truthvalue_conversion (location, expr);
if (TREE_CODE (expr) == INTEGER_CST && int_operands && !int_const)
{
if (TREE_OVERFLOW (expr))
return expr;
else
return note_integer_operands (expr);
}
if (TREE_CODE (expr) == INTEGER_CST && !int_const)
return build1 (NOP_EXPR, TREE_TYPE (expr), expr);
return expr;
}
/* Convert EXPR to a contained DECL, updating *TC, *TI and *SE as
required. */
tree
c_expr_to_decl (tree expr, bool *tc ATTRIBUTE_UNUSED, bool *se)
{
if (TREE_CODE (expr) == COMPOUND_LITERAL_EXPR)
{
tree decl = COMPOUND_LITERAL_EXPR_DECL (expr);
/* Executing a compound literal inside a function reinitializes
it. */
if (!TREE_STATIC (decl))
*se = true;
return decl;
}
else
return expr;
}
/* Generate OMP construct CODE, with BODY and CLAUSES as its compound
statement. LOC is the location of the construct. */
tree
c_finish_omp_construct (location_t loc, enum tree_code code, tree body,
tree clauses)
{
body = c_end_compound_stmt (loc, body, true);
tree stmt = make_node (code);
TREE_TYPE (stmt) = void_type_node;
OMP_BODY (stmt) = body;
OMP_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* Generate OACC_DATA, with CLAUSES and BLOCK as its compound
statement. LOC is the location of the OACC_DATA. */
tree
c_finish_oacc_data (location_t loc, tree clauses, tree block)
{
tree stmt;
block = c_end_compound_stmt (loc, block, true);
stmt = make_node (OACC_DATA);
TREE_TYPE (stmt) = void_type_node;
OACC_DATA_CLAUSES (stmt) = clauses;
OACC_DATA_BODY (stmt) = block;
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* Generate OACC_HOST_DATA, with CLAUSES and BLOCK as its compound
statement. LOC is the location of the OACC_HOST_DATA. */
tree
c_finish_oacc_host_data (location_t loc, tree clauses, tree block)
{
tree stmt;
block = c_end_compound_stmt (loc, block, true);
stmt = make_node (OACC_HOST_DATA);
TREE_TYPE (stmt) = void_type_node;
OACC_HOST_DATA_CLAUSES (stmt) = clauses;
OACC_HOST_DATA_BODY (stmt) = block;
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* Like c_begin_compound_stmt, except force the retention of the BLOCK. */
tree
c_begin_omp_parallel (void)
{
tree block;
keep_next_level ();
block = c_begin_compound_stmt (true);
return block;
}
/* Generate OMP_PARALLEL, with CLAUSES and BLOCK as its compound
statement. LOC is the location of the OMP_PARALLEL. */
tree
c_finish_omp_parallel (location_t loc, tree clauses, tree block)
{
tree stmt;
block = c_end_compound_stmt (loc, block, true);
stmt = make_node (OMP_PARALLEL);
TREE_TYPE (stmt) = void_type_node;
OMP_PARALLEL_CLAUSES (stmt) = clauses;
OMP_PARALLEL_BODY (stmt) = block;
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* Like c_begin_compound_stmt, except force the retention of the BLOCK. */
tree
c_begin_omp_task (void)
{
tree block;
keep_next_level ();
block = c_begin_compound_stmt (true);
return block;
}
/* Generate OMP_TASK, with CLAUSES and BLOCK as its compound
statement. LOC is the location of the #pragma. */
tree
c_finish_omp_task (location_t loc, tree clauses, tree block)
{
tree stmt;
block = c_end_compound_stmt (loc, block, true);
stmt = make_node (OMP_TASK);
TREE_TYPE (stmt) = void_type_node;
OMP_TASK_CLAUSES (stmt) = clauses;
OMP_TASK_BODY (stmt) = block;
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* Generate GOMP_cancel call for #pragma omp cancel. */
void
c_finish_omp_cancel (location_t loc, tree clauses)
{
tree fn = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
int mask = 0;
if (omp_find_clause (clauses, OMP_CLAUSE_PARALLEL))
mask = 1;
else if (omp_find_clause (clauses, OMP_CLAUSE_FOR))
mask = 2;
else if (omp_find_clause (clauses, OMP_CLAUSE_SECTIONS))
mask = 4;
else if (omp_find_clause (clauses, OMP_CLAUSE_TASKGROUP))
mask = 8;
else
{
error_at (loc, "%<#pragma omp cancel%> must specify one of "
"%<parallel%>, %<for%>, %<sections%> or %<taskgroup%> "
"clauses");
return;
}
tree ifc = omp_find_clause (clauses, OMP_CLAUSE_IF);
if (ifc != NULL_TREE)
{
if (OMP_CLAUSE_IF_MODIFIER (ifc) != ERROR_MARK
&& OMP_CLAUSE_IF_MODIFIER (ifc) != VOID_CST)
error_at (OMP_CLAUSE_LOCATION (ifc),
"expected %<cancel%> %<if%> clause modifier");
else
{
tree ifc2 = omp_find_clause (OMP_CLAUSE_CHAIN (ifc), OMP_CLAUSE_IF);
if (ifc2 != NULL_TREE)
{
gcc_assert (OMP_CLAUSE_IF_MODIFIER (ifc) == VOID_CST
&& OMP_CLAUSE_IF_MODIFIER (ifc2) != ERROR_MARK
&& OMP_CLAUSE_IF_MODIFIER (ifc2) != VOID_CST);
error_at (OMP_CLAUSE_LOCATION (ifc2),
"expected %<cancel%> %<if%> clause modifier");
}
}
tree type = TREE_TYPE (OMP_CLAUSE_IF_EXPR (ifc));
ifc = fold_build2_loc (OMP_CLAUSE_LOCATION (ifc), NE_EXPR,
boolean_type_node, OMP_CLAUSE_IF_EXPR (ifc),
build_zero_cst (type));
}
else
ifc = boolean_true_node;
tree stmt = build_call_expr_loc (loc, fn, 2,
build_int_cst (integer_type_node, mask),
ifc);
add_stmt (stmt);
}
/* Generate GOMP_cancellation_point call for
#pragma omp cancellation point. */
void
c_finish_omp_cancellation_point (location_t loc, tree clauses)
{
tree fn = builtin_decl_explicit (BUILT_IN_GOMP_CANCELLATION_POINT);
int mask = 0;
if (omp_find_clause (clauses, OMP_CLAUSE_PARALLEL))
mask = 1;
else if (omp_find_clause (clauses, OMP_CLAUSE_FOR))
mask = 2;
else if (omp_find_clause (clauses, OMP_CLAUSE_SECTIONS))
mask = 4;
else if (omp_find_clause (clauses, OMP_CLAUSE_TASKGROUP))
mask = 8;
else
{
error_at (loc, "%<#pragma omp cancellation point%> must specify one of "
"%<parallel%>, %<for%>, %<sections%> or %<taskgroup%> "
"clauses");
return;
}
tree stmt = build_call_expr_loc (loc, fn, 1,
build_int_cst (integer_type_node, mask));
add_stmt (stmt);
}
/* Helper function for handle_omp_array_sections. Called recursively
to handle multiple array-section-subscripts. C is the clause,
T current expression (initially OMP_CLAUSE_DECL), which is either
a TREE_LIST for array-section-subscript (TREE_PURPOSE is low-bound
expression if specified, TREE_VALUE length expression if specified,
TREE_CHAIN is what it has been specified after, or some decl.
TYPES vector is populated with array section types, MAYBE_ZERO_LEN
set to true if any of the array-section-subscript could have length
of zero (explicit or implicit), FIRST_NON_ONE is the index of the
first array-section-subscript which is known not to have length
of one. Given say:
map(a[:b][2:1][:c][:2][:d][e:f][2:5])
FIRST_NON_ONE will be 3, array-section-subscript [:b], [2:1] and [:c]
all are or may have length of 1, array-section-subscript [:2] is the
first one known not to have length 1. For array-section-subscript
<= FIRST_NON_ONE we diagnose non-contiguous arrays if low bound isn't
0 or length isn't the array domain max + 1, for > FIRST_NON_ONE we
can if MAYBE_ZERO_LEN is false. MAYBE_ZERO_LEN will be true in the above
case though, as some lengths could be zero. */
static tree
handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types,
bool &maybe_zero_len, unsigned int &first_non_one,
enum c_omp_region_type ort)
{
tree ret, low_bound, length, type;
if (TREE_CODE (t) != TREE_LIST)
{
if (error_operand_p (t))
return error_mark_node;
ret = t;
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
&& TYPE_ATOMIC (strip_array_types (TREE_TYPE (t))))
{
error_at (OMP_CLAUSE_LOCATION (c), "%<_Atomic%> %qE in %qs clause",
t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
if (TREE_CODE (t) == COMPONENT_REF
&& (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FROM))
{
if (DECL_BIT_FIELD (TREE_OPERAND (t, 1)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"bit-field %qE in %qs clause",
t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
while (TREE_CODE (t) == COMPONENT_REF)
{
if (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == UNION_TYPE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is a member of a union", t);
return error_mark_node;
}
t = TREE_OPERAND (t, 0);
if (ort == C_ORT_ACC && TREE_CODE (t) == MEM_REF)
{
if (maybe_ne (mem_ref_offset (t), 0))
error_at (OMP_CLAUSE_LOCATION (c),
"cannot dereference %qE in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
else
t = TREE_OPERAND (t, 0);
}
}
}
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
if (DECL_P (t))
error_at (OMP_CLAUSE_LOCATION (c),
"%qD is not a variable in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
else if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
&& TYPE_ATOMIC (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c), "%<_Atomic%> %qD in %qs clause",
t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
else if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
&& VAR_P (t)
&& DECL_THREAD_LOCAL_P (t))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD is threadprivate variable in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& TYPE_ATOMIC (TREE_TYPE (t))
&& POINTER_TYPE_P (TREE_TYPE (t)))
{
/* If the array section is pointer based and the pointer
itself is _Atomic qualified, we need to atomically load
the pointer. */
c_expr expr;
memset (&expr, 0, sizeof (expr));
expr.value = ret;
expr = convert_lvalue_to_rvalue (OMP_CLAUSE_LOCATION (c),
expr, false, false);
ret = expr.value;
}
return ret;
}
ret = handle_omp_array_sections_1 (c, TREE_CHAIN (t), types,
maybe_zero_len, first_non_one, ort);
if (ret == error_mark_node || ret == NULL_TREE)
return ret;
type = TREE_TYPE (ret);
low_bound = TREE_PURPOSE (t);
length = TREE_VALUE (t);
if (low_bound == error_mark_node || length == error_mark_node)
return error_mark_node;
if (low_bound && !INTEGRAL_TYPE_P (TREE_TYPE (low_bound)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"low bound %qE of array section does not have integral type",
low_bound);
return error_mark_node;
}
if (length && !INTEGRAL_TYPE_P (TREE_TYPE (length)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"length %qE of array section does not have integral type",
length);
return error_mark_node;
}
if (low_bound
&& TREE_CODE (low_bound) == INTEGER_CST
&& TYPE_PRECISION (TREE_TYPE (low_bound))
> TYPE_PRECISION (sizetype))
low_bound = fold_convert (sizetype, low_bound);
if (length
&& TREE_CODE (length) == INTEGER_CST
&& TYPE_PRECISION (TREE_TYPE (length))
> TYPE_PRECISION (sizetype))
length = fold_convert (sizetype, length);
if (low_bound == NULL_TREE)
low_bound = integer_zero_node;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH))
{
if (length != integer_one_node)
{
error_at (OMP_CLAUSE_LOCATION (c),
"expected single pointer in %qs clause",
c_omp_map_clause_name (c, ort == C_ORT_ACC));
return error_mark_node;
}
}
if (length != NULL_TREE)
{
if (!integer_nonzerop (length))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)
{
if (integer_zerop (length))
{
error_at (OMP_CLAUSE_LOCATION (c),
"zero length array section in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
}
else
maybe_zero_len = true;
}
if (first_non_one == types.length ()
&& (TREE_CODE (length) != INTEGER_CST || integer_onep (length)))
first_non_one++;
}
if (TREE_CODE (type) == ARRAY_TYPE)
{
if (length == NULL_TREE
&& (TYPE_DOMAIN (type) == NULL_TREE
|| TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE))
{
error_at (OMP_CLAUSE_LOCATION (c),
"for unknown bound array type length expression must "
"be specified");
return error_mark_node;
}
if (TREE_CODE (low_bound) == INTEGER_CST
&& tree_int_cst_sgn (low_bound) == -1)
{
error_at (OMP_CLAUSE_LOCATION (c),
"negative low bound in array section in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
if (length != NULL_TREE
&& TREE_CODE (length) == INTEGER_CST
&& tree_int_cst_sgn (length) == -1)
{
error_at (OMP_CLAUSE_LOCATION (c),
"negative length in array section in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
if (TYPE_DOMAIN (type)
&& TYPE_MAX_VALUE (TYPE_DOMAIN (type))
&& TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type)))
== INTEGER_CST)
{
tree size
= fold_convert (sizetype, TYPE_MAX_VALUE (TYPE_DOMAIN (type)));
size = size_binop (PLUS_EXPR, size, size_one_node);
if (TREE_CODE (low_bound) == INTEGER_CST)
{
if (tree_int_cst_lt (size, low_bound))
{
error_at (OMP_CLAUSE_LOCATION (c),
"low bound %qE above array section size "
"in %qs clause", low_bound,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
if (tree_int_cst_equal (size, low_bound))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)
{
error_at (OMP_CLAUSE_LOCATION (c),
"zero length array section in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
maybe_zero_len = true;
}
else if (length == NULL_TREE
&& first_non_one == types.length ()
&& tree_int_cst_equal
(TYPE_MAX_VALUE (TYPE_DOMAIN (type)),
low_bound))
first_non_one++;
}
else if (length == NULL_TREE)
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_IN_REDUCTION
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_TASK_REDUCTION)
maybe_zero_len = true;
if (first_non_one == types.length ())
first_non_one++;
}
if (length && TREE_CODE (length) == INTEGER_CST)
{
if (tree_int_cst_lt (size, length))
{
error_at (OMP_CLAUSE_LOCATION (c),
"length %qE above array section size "
"in %qs clause", length,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
if (TREE_CODE (low_bound) == INTEGER_CST)
{
tree lbpluslen
= size_binop (PLUS_EXPR,
fold_convert (sizetype, low_bound),
fold_convert (sizetype, length));
if (TREE_CODE (lbpluslen) == INTEGER_CST
&& tree_int_cst_lt (size, lbpluslen))
{
error_at (OMP_CLAUSE_LOCATION (c),
"high bound %qE above array section size "
"in %qs clause", lbpluslen,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
}
}
}
else if (length == NULL_TREE)
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_IN_REDUCTION
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_TASK_REDUCTION)
maybe_zero_len = true;
if (first_non_one == types.length ())
first_non_one++;
}
/* For [lb:] we will need to evaluate lb more than once. */
if (length == NULL_TREE && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND)
{
tree lb = save_expr (low_bound);
if (lb != low_bound)
{
TREE_PURPOSE (t) = lb;
low_bound = lb;
}
}
}
else if (TREE_CODE (type) == POINTER_TYPE)
{
if (length == NULL_TREE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"for pointer type length expression must be specified");
return error_mark_node;
}
if (length != NULL_TREE
&& TREE_CODE (length) == INTEGER_CST
&& tree_int_cst_sgn (length) == -1)
{
error_at (OMP_CLAUSE_LOCATION (c),
"negative length in array section in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
/* If there is a pointer type anywhere but in the very first
array-section-subscript, the array section can't be contiguous. */
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
&& TREE_CODE (TREE_CHAIN (t)) == TREE_LIST)
{
error_at (OMP_CLAUSE_LOCATION (c),
"array section is not contiguous in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
}
else
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE does not have pointer or array type", ret);
return error_mark_node;
}
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND)
types.safe_push (TREE_TYPE (ret));
/* We will need to evaluate lb more than once. */
tree lb = save_expr (low_bound);
if (lb != low_bound)
{
TREE_PURPOSE (t) = lb;
low_bound = lb;
}
ret = build_array_ref (OMP_CLAUSE_LOCATION (c), ret, low_bound);
return ret;
}
/* Handle array sections for clause C. */
static bool
handle_omp_array_sections (tree c, enum c_omp_region_type ort)
{
bool maybe_zero_len = false;
unsigned int first_non_one = 0;
auto_vec<tree, 10> types;
tree *tp = &OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& TREE_CODE (*tp) == TREE_LIST
&& TREE_PURPOSE (*tp)
&& TREE_CODE (TREE_PURPOSE (*tp)) == TREE_VEC)
tp = &TREE_VALUE (*tp);
tree first = handle_omp_array_sections_1 (c, *tp, types,
maybe_zero_len, first_non_one,
ort);
if (first == error_mark_node)
return true;
if (first == NULL_TREE)
return false;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
{
tree t = *tp;
tree tem = NULL_TREE;
/* Need to evaluate side effects in the length expressions
if any. */
while (TREE_CODE (t) == TREE_LIST)
{
if (TREE_VALUE (t) && TREE_SIDE_EFFECTS (TREE_VALUE (t)))
{
if (tem == NULL_TREE)
tem = TREE_VALUE (t);
else
tem = build2 (COMPOUND_EXPR, TREE_TYPE (tem),
TREE_VALUE (t), tem);
}
t = TREE_CHAIN (t);
}
if (tem)
first = build2 (COMPOUND_EXPR, TREE_TYPE (first), tem, first);
first = c_fully_fold (first, false, NULL, true);
*tp = first;
}
else
{
unsigned int num = types.length (), i;
tree t, side_effects = NULL_TREE, size = NULL_TREE;
tree condition = NULL_TREE;
if (int_size_in_bytes (TREE_TYPE (first)) <= 0)
maybe_zero_len = true;
for (i = num, t = OMP_CLAUSE_DECL (c); i > 0;
t = TREE_CHAIN (t))
{
tree low_bound = TREE_PURPOSE (t);
tree length = TREE_VALUE (t);
i--;
if (low_bound
&& TREE_CODE (low_bound) == INTEGER_CST
&& TYPE_PRECISION (TREE_TYPE (low_bound))
> TYPE_PRECISION (sizetype))
low_bound = fold_convert (sizetype, low_bound);
if (length
&& TREE_CODE (length) == INTEGER_CST
&& TYPE_PRECISION (TREE_TYPE (length))
> TYPE_PRECISION (sizetype))
length = fold_convert (sizetype, length);
if (low_bound == NULL_TREE)
low_bound = integer_zero_node;
if (!maybe_zero_len && i > first_non_one)
{
if (integer_nonzerop (low_bound))
goto do_warn_noncontiguous;
if (length != NULL_TREE
&& TREE_CODE (length) == INTEGER_CST
&& TYPE_DOMAIN (types[i])
&& TYPE_MAX_VALUE (TYPE_DOMAIN (types[i]))
&& TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])))
== INTEGER_CST)
{
tree size;
size = size_binop (PLUS_EXPR,
TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])),
size_one_node);
if (!tree_int_cst_equal (length, size))
{
do_warn_noncontiguous:
error_at (OMP_CLAUSE_LOCATION (c),
"array section is not contiguous in %qs "
"clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return true;
}
}
if (length != NULL_TREE
&& TREE_SIDE_EFFECTS (length))
{
if (side_effects == NULL_TREE)
side_effects = length;
else
side_effects = build2 (COMPOUND_EXPR,
TREE_TYPE (side_effects),
length, side_effects);
}
}
else
{
tree l;
if (i > first_non_one
&& ((length && integer_nonzerop (length))
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION))
continue;
if (length)
l = fold_convert (sizetype, length);
else
{
l = size_binop (PLUS_EXPR,
TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])),
size_one_node);
l = size_binop (MINUS_EXPR, l,
fold_convert (sizetype, low_bound));
}
if (i > first_non_one)
{
l = fold_build2 (NE_EXPR, boolean_type_node, l,
size_zero_node);
if (condition == NULL_TREE)
condition = l;
else
condition = fold_build2 (BIT_AND_EXPR, boolean_type_node,
l, condition);
}
else if (size == NULL_TREE)
{
size = size_in_bytes (TREE_TYPE (types[i]));
tree eltype = TREE_TYPE (types[num - 1]);
while (TREE_CODE (eltype) == ARRAY_TYPE)
eltype = TREE_TYPE (eltype);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)
{
if (integer_zerop (size)
|| integer_zerop (size_in_bytes (eltype)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"zero length array section in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
size = size_binop (EXACT_DIV_EXPR, size,
size_in_bytes (eltype));
}
size = size_binop (MULT_EXPR, size, l);
if (condition)
size = fold_build3 (COND_EXPR, sizetype, condition,
size, size_zero_node);
}
else
size = size_binop (MULT_EXPR, size, l);
}
}
if (side_effects)
size = build2 (COMPOUND_EXPR, sizetype, side_effects, size);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)
{
size = size_binop (MINUS_EXPR, size, size_one_node);
size = c_fully_fold (size, false, NULL);
size = save_expr (size);
tree index_type = build_index_type (size);
tree eltype = TREE_TYPE (first);
while (TREE_CODE (eltype) == ARRAY_TYPE)
eltype = TREE_TYPE (eltype);
tree type = build_array_type (eltype, index_type);
tree ptype = build_pointer_type (eltype);
if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
t = build_fold_addr_expr (t);
tree t2 = build_fold_addr_expr (first);
t2 = fold_convert_loc (OMP_CLAUSE_LOCATION (c),
ptrdiff_type_node, t2);
t2 = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR,
ptrdiff_type_node, t2,
fold_convert_loc (OMP_CLAUSE_LOCATION (c),
ptrdiff_type_node, t));
t2 = c_fully_fold (t2, false, NULL);
if (tree_fits_shwi_p (t2))
t = build2 (MEM_REF, type, t,
build_int_cst (ptype, tree_to_shwi (t2)));
else
{
t2 = fold_convert_loc (OMP_CLAUSE_LOCATION (c), sizetype, t2);
t = build2_loc (OMP_CLAUSE_LOCATION (c), POINTER_PLUS_EXPR,
TREE_TYPE (t), t, t2);
t = build2 (MEM_REF, type, t, build_int_cst (ptype, 0));
}
OMP_CLAUSE_DECL (c) = t;
return false;
}
first = c_fully_fold (first, false, NULL);
OMP_CLAUSE_DECL (c) = first;
if (size)
size = c_fully_fold (size, false, NULL);
OMP_CLAUSE_SIZE (c) = size;
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
|| (TREE_CODE (t) == COMPONENT_REF
&& TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE))
return false;
gcc_assert (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FORCE_DEVICEPTR);
if (ort == C_ORT_OMP || ort == C_ORT_ACC)
switch (OMP_CLAUSE_MAP_KIND (c))
{
case GOMP_MAP_ALLOC:
case GOMP_MAP_IF_PRESENT:
case GOMP_MAP_TO:
case GOMP_MAP_FROM:
case GOMP_MAP_TOFROM:
case GOMP_MAP_ALWAYS_TO:
case GOMP_MAP_ALWAYS_FROM:
case GOMP_MAP_ALWAYS_TOFROM:
case GOMP_MAP_RELEASE:
case GOMP_MAP_DELETE:
case GOMP_MAP_FORCE_TO:
case GOMP_MAP_FORCE_FROM:
case GOMP_MAP_FORCE_TOFROM:
case GOMP_MAP_FORCE_PRESENT:
OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (c) = 1;
break;
default:
break;
}
tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP);
if (ort != C_ORT_OMP && ort != C_ORT_ACC)
OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_POINTER);
else if (TREE_CODE (t) == COMPONENT_REF)
{
gomp_map_kind k = (ort == C_ORT_ACC) ? GOMP_MAP_ATTACH_DETACH
: GOMP_MAP_ALWAYS_POINTER;
OMP_CLAUSE_SET_MAP_KIND (c2, k);
}
else
OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_FIRSTPRIVATE_POINTER);
if (OMP_CLAUSE_MAP_KIND (c2) != GOMP_MAP_FIRSTPRIVATE_POINTER
&& !c_mark_addressable (t))
return false;
OMP_CLAUSE_DECL (c2) = t;
t = build_fold_addr_expr (first);
t = fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, t);
tree ptr = OMP_CLAUSE_DECL (c2);
if (!POINTER_TYPE_P (TREE_TYPE (ptr)))
ptr = build_fold_addr_expr (ptr);
t = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR,
ptrdiff_type_node, t,
fold_convert_loc (OMP_CLAUSE_LOCATION (c),
ptrdiff_type_node, ptr));
t = c_fully_fold (t, false, NULL);
OMP_CLAUSE_SIZE (c2) = t;
OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (c);
OMP_CLAUSE_CHAIN (c) = c2;
}
return false;
}
/* Helper function of finish_omp_clauses. Clone STMT as if we were making
an inline call. But, remap
the OMP_DECL1 VAR_DECL (omp_out resp. omp_orig) to PLACEHOLDER
and OMP_DECL2 VAR_DECL (omp_in resp. omp_priv) to DECL. */
static tree
c_clone_omp_udr (tree stmt, tree omp_decl1, tree omp_decl2,
tree decl, tree placeholder)
{
copy_body_data id;
hash_map<tree, tree> decl_map;
decl_map.put (omp_decl1, placeholder);
decl_map.put (omp_decl2, decl);
memset (&id, 0, sizeof (id));
id.src_fn = DECL_CONTEXT (omp_decl1);
id.dst_fn = current_function_decl;
id.src_cfun = DECL_STRUCT_FUNCTION (id.src_fn);
id.decl_map = &decl_map;
id.copy_decl = copy_decl_no_change;
id.transform_call_graph_edges = CB_CGE_DUPLICATE;
id.transform_new_cfg = true;
id.transform_return_to_modify = false;
id.transform_lang_insert_block = NULL;
id.eh_lp_nr = 0;
walk_tree (&stmt, copy_tree_body_r, &id, NULL);
return stmt;
}
/* Helper function of c_finish_omp_clauses, called via walk_tree.
Find OMP_CLAUSE_PLACEHOLDER (passed in DATA) in *TP. */
static tree
c_find_omp_placeholder_r (tree *tp, int *, void *data)
{
if (*tp == (tree) data)
return *tp;
return NULL_TREE;
}
/* Similarly, but also walk aggregate fields. */
struct c_find_omp_var_s { tree var; hash_set<tree> *pset; };
static tree
c_find_omp_var_r (tree *tp, int *, void *data)
{
if (*tp == ((struct c_find_omp_var_s *) data)->var)
return *tp;
if (RECORD_OR_UNION_TYPE_P (*tp))
{
tree field;
hash_set<tree> *pset = ((struct c_find_omp_var_s *) data)->pset;
for (field = TYPE_FIELDS (*tp); field;
field = DECL_CHAIN (field))
if (TREE_CODE (field) == FIELD_DECL)
{
tree ret = walk_tree (&DECL_FIELD_OFFSET (field),
c_find_omp_var_r, data, pset);
if (ret)
return ret;
ret = walk_tree (&DECL_SIZE (field), c_find_omp_var_r, data, pset);
if (ret)
return ret;
ret = walk_tree (&DECL_SIZE_UNIT (field), c_find_omp_var_r, data,
pset);
if (ret)
return ret;
ret = walk_tree (&TREE_TYPE (field), c_find_omp_var_r, data, pset);
if (ret)
return ret;
}
}
else if (INTEGRAL_TYPE_P (*tp))
return walk_tree (&TYPE_MAX_VALUE (*tp), c_find_omp_var_r, data,
((struct c_find_omp_var_s *) data)->pset);
return NULL_TREE;
}
/* Finish OpenMP iterators ITER. Return true if they are errorneous
and clauses containing them should be removed. */
static bool
c_omp_finish_iterators (tree iter)
{
bool ret = false;
for (tree it = iter; it; it = TREE_CHAIN (it))
{
tree var = TREE_VEC_ELT (it, 0);
tree begin = TREE_VEC_ELT (it, 1);
tree end = TREE_VEC_ELT (it, 2);
tree step = TREE_VEC_ELT (it, 3);
tree orig_step;
tree type = TREE_TYPE (var);
location_t loc = DECL_SOURCE_LOCATION (var);
if (type == error_mark_node)
{
ret = true;
continue;
}
if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
{
error_at (loc, "iterator %qD has neither integral nor pointer type",
var);
ret = true;
continue;
}
else if (TYPE_ATOMIC (type))
{
error_at (loc, "iterator %qD has %<_Atomic%> qualified type", var);
ret = true;
continue;
}
else if (TYPE_READONLY (type))
{
error_at (loc, "iterator %qD has const qualified type", var);
ret = true;
continue;
}
else if (step == error_mark_node
|| TREE_TYPE (step) == error_mark_node)
{
ret = true;
continue;
}
else if (!INTEGRAL_TYPE_P (TREE_TYPE (step)))
{
error_at (EXPR_LOC_OR_LOC (step, loc),
"iterator step with non-integral type");
ret = true;
continue;
}
begin = c_fully_fold (build_c_cast (loc, type, begin), false, NULL);
end = c_fully_fold (build_c_cast (loc, type, end), false, NULL);
orig_step = save_expr (c_fully_fold (step, false, NULL));
tree stype = POINTER_TYPE_P (type) ? sizetype : type;
step = c_fully_fold (build_c_cast (loc, stype, orig_step), false, NULL);
if (POINTER_TYPE_P (type))
{
begin = save_expr (begin);
step = pointer_int_sum (loc, PLUS_EXPR, begin, step);
step = fold_build2_loc (loc, MINUS_EXPR, sizetype,
fold_convert (sizetype, step),
fold_convert (sizetype, begin));
step = fold_convert (ssizetype, step);
}
if (integer_zerop (step))
{
error_at (loc, "iterator %qD has zero step", var);
ret = true;
continue;
}
if (begin == error_mark_node
|| end == error_mark_node
|| step == error_mark_node
|| orig_step == error_mark_node)
{
ret = true;
continue;
}
hash_set<tree> pset;
tree it2;
for (it2 = TREE_CHAIN (it); it2; it2 = TREE_CHAIN (it2))
{
tree var2 = TREE_VEC_ELT (it2, 0);
tree begin2 = TREE_VEC_ELT (it2, 1);
tree end2 = TREE_VEC_ELT (it2, 2);
tree step2 = TREE_VEC_ELT (it2, 3);
tree type2 = TREE_TYPE (var2);
location_t loc2 = DECL_SOURCE_LOCATION (var2);
struct c_find_omp_var_s data = { var, &pset };
if (walk_tree (&type2, c_find_omp_var_r, &data, &pset))
{
error_at (loc2,
"type of iterator %qD refers to outer iterator %qD",
var2, var);
break;
}
else if (walk_tree (&begin2, c_find_omp_var_r, &data, &pset))
{
error_at (EXPR_LOC_OR_LOC (begin2, loc2),
"begin expression refers to outer iterator %qD", var);
break;
}
else if (walk_tree (&end2, c_find_omp_var_r, &data, &pset))
{
error_at (EXPR_LOC_OR_LOC (end2, loc2),
"end expression refers to outer iterator %qD", var);
break;
}
else if (walk_tree (&step2, c_find_omp_var_r, &data, &pset))
{
error_at (EXPR_LOC_OR_LOC (step2, loc2),
"step expression refers to outer iterator %qD", var);
break;
}
}
if (it2)
{
ret = true;
continue;
}
TREE_VEC_ELT (it, 1) = begin;
TREE_VEC_ELT (it, 2) = end;
TREE_VEC_ELT (it, 3) = step;
TREE_VEC_ELT (it, 4) = orig_step;
}
return ret;
}
/* Ensure that pointers are used in OpenACC attach and detach clauses.
Return true if an error has been detected. */
static bool
c_oacc_check_attachments (tree c)
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP)
return false;
/* OpenACC attach / detach clauses must be pointers. */
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH)
{
tree t = OMP_CLAUSE_DECL (c);
while (TREE_CODE (t) == TREE_LIST)
t = TREE_CHAIN (t);
if (TREE_CODE (TREE_TYPE (t)) != POINTER_TYPE)
{
error_at (OMP_CLAUSE_LOCATION (c), "expected pointer in %qs clause",
c_omp_map_clause_name (c, true));
return true;
}
}
return false;
}
/* For all elements of CLAUSES, validate them against their constraints.
Remove any elements from the list that are invalid. */
tree
c_finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
{
bitmap_head generic_head, firstprivate_head, lastprivate_head;
bitmap_head aligned_head, map_head, map_field_head, oacc_reduction_head;
tree c, t, type, *pc;
tree simdlen = NULL_TREE, safelen = NULL_TREE;
bool branch_seen = false;
bool copyprivate_seen = false;
bool linear_variable_step_check = false;
tree *nowait_clause = NULL;
tree ordered_clause = NULL_TREE;
tree schedule_clause = NULL_TREE;
bool oacc_async = false;
tree last_iterators = NULL_TREE;
bool last_iterators_remove = false;
tree *nogroup_seen = NULL;
tree *order_clause = NULL;
/* 1 if normal/task reduction has been seen, -1 if inscan reduction
has been seen, -2 if mixed inscan/normal reduction diagnosed. */
int reduction_seen = 0;
bitmap_obstack_initialize (NULL);
bitmap_initialize (&generic_head, &bitmap_default_obstack);
bitmap_initialize (&firstprivate_head, &bitmap_default_obstack);
bitmap_initialize (&lastprivate_head, &bitmap_default_obstack);
bitmap_initialize (&aligned_head, &bitmap_default_obstack);
/* If ort == C_ORT_OMP_DECLARE_SIMD used as uniform_head instead. */
bitmap_initialize (&map_head, &bitmap_default_obstack);
bitmap_initialize (&map_field_head, &bitmap_default_obstack);
/* If ort == C_ORT_OMP used as nontemporal_head or use_device_xxx_head
instead. */
bitmap_initialize (&oacc_reduction_head, &bitmap_default_obstack);
if (ort & C_ORT_ACC)
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_ASYNC)
{
oacc_async = true;
break;
}
for (pc = &clauses, c = clauses; c ; c = *pc)
{
bool remove = false;
bool need_complete = false;
bool need_implicitly_determined = false;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_SHARED:
need_implicitly_determined = true;
goto check_dup_generic;
case OMP_CLAUSE_PRIVATE:
need_complete = true;
need_implicitly_determined = true;
goto check_dup_generic;
case OMP_CLAUSE_REDUCTION:
if (reduction_seen == 0)
reduction_seen = OMP_CLAUSE_REDUCTION_INSCAN (c) ? -1 : 1;
else if (reduction_seen != -2
&& reduction_seen != (OMP_CLAUSE_REDUCTION_INSCAN (c)
? -1 : 1))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<inscan%> and non-%<inscan%> %<reduction%> clauses "
"on the same construct");
reduction_seen = -2;
}
/* FALLTHRU */
case OMP_CLAUSE_IN_REDUCTION:
case OMP_CLAUSE_TASK_REDUCTION:
need_implicitly_determined = true;
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) == TREE_LIST)
{
if (handle_omp_array_sections (c, ort))
{
remove = true;
break;
}
t = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_INSCAN (c))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<inscan%> %<reduction%> clause with array "
"section");
remove = true;
break;
}
}
t = require_complete_type (OMP_CLAUSE_LOCATION (c), t);
if (t == error_mark_node)
{
remove = true;
break;
}
if (oacc_async)
c_mark_addressable (t);
type = TREE_TYPE (t);
if (TREE_CODE (t) == MEM_REF)
type = TREE_TYPE (type);
if (TREE_CODE (type) == ARRAY_TYPE)
{
tree oatype = type;
gcc_assert (TREE_CODE (t) != MEM_REF);
while (TREE_CODE (type) == ARRAY_TYPE)
type = TREE_TYPE (type);
if (integer_zerop (TYPE_SIZE_UNIT (type)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD in %<reduction%> clause is a zero size array",
t);
remove = true;
break;
}
tree size = size_binop (EXACT_DIV_EXPR, TYPE_SIZE_UNIT (oatype),
TYPE_SIZE_UNIT (type));
if (integer_zerop (size))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD in %<reduction%> clause is a zero size array",
t);
remove = true;
break;
}
size = size_binop (MINUS_EXPR, size, size_one_node);
size = save_expr (size);
tree index_type = build_index_type (size);
tree atype = build_array_type (type, index_type);
tree ptype = build_pointer_type (type);
if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
t = build_fold_addr_expr (t);
t = build2 (MEM_REF, atype, t, build_int_cst (ptype, 0));
OMP_CLAUSE_DECL (c) = t;
}
if (TYPE_ATOMIC (type))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<_Atomic%> %qE in %<reduction%> clause", t);
remove = true;
break;
}
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_REDUCTION_TASK (c))
{
/* Disallow zero sized or potentially zero sized task
reductions. */
if (integer_zerop (TYPE_SIZE_UNIT (type)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"zero sized type %qT in %qs clause", type,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
break;
}
else if (TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
{
error_at (OMP_CLAUSE_LOCATION (c),
"variable sized type %qT in %qs clause", type,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
break;
}
}
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) == NULL_TREE
&& (FLOAT_TYPE_P (type)
|| TREE_CODE (type) == COMPLEX_TYPE))
{
enum tree_code r_code = OMP_CLAUSE_REDUCTION_CODE (c);
const char *r_name = NULL;
switch (r_code)
{
case PLUS_EXPR:
case MULT_EXPR:
case MINUS_EXPR:
break;
case MIN_EXPR:
if (TREE_CODE (type) == COMPLEX_TYPE)
r_name = "min";
break;
case MAX_EXPR:
if (TREE_CODE (type) == COMPLEX_TYPE)
r_name = "max";
break;
case BIT_AND_EXPR:
r_name = "&";
break;
case BIT_XOR_EXPR:
r_name = "^";
break;
case BIT_IOR_EXPR:
r_name = "|";
break;
case TRUTH_ANDIF_EXPR:
if (FLOAT_TYPE_P (type))
r_name = "&&";
break;
case TRUTH_ORIF_EXPR:
if (FLOAT_TYPE_P (type))
r_name = "||";
break;
default:
gcc_unreachable ();
}
if (r_name)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE has invalid type for %<reduction(%s)%>",
t, r_name);
remove = true;
break;
}
}
else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) == error_mark_node)
{
error_at (OMP_CLAUSE_LOCATION (c),
"user defined reduction not found for %qE", t);
remove = true;
break;
}
else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree list = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
type = TYPE_MAIN_VARIANT (type);
tree placeholder = build_decl (OMP_CLAUSE_LOCATION (c),
VAR_DECL, NULL_TREE, type);
tree decl_placeholder = NULL_TREE;
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = placeholder;
DECL_ARTIFICIAL (placeholder) = 1;
DECL_IGNORED_P (placeholder) = 1;
if (TREE_CODE (t) == MEM_REF)
{
decl_placeholder = build_decl (OMP_CLAUSE_LOCATION (c),
VAR_DECL, NULL_TREE, type);
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = decl_placeholder;
DECL_ARTIFICIAL (decl_placeholder) = 1;
DECL_IGNORED_P (decl_placeholder) = 1;
}
if (TREE_ADDRESSABLE (TREE_VEC_ELT (list, 0)))
c_mark_addressable (placeholder);
if (TREE_ADDRESSABLE (TREE_VEC_ELT (list, 1)))
c_mark_addressable (decl_placeholder ? decl_placeholder
: OMP_CLAUSE_DECL (c));
OMP_CLAUSE_REDUCTION_MERGE (c)
= c_clone_omp_udr (TREE_VEC_ELT (list, 2),
TREE_VEC_ELT (list, 0),
TREE_VEC_ELT (list, 1),
decl_placeholder ? decl_placeholder
: OMP_CLAUSE_DECL (c), placeholder);
OMP_CLAUSE_REDUCTION_MERGE (c)
= build3_loc (OMP_CLAUSE_LOCATION (c), BIND_EXPR,
void_type_node, NULL_TREE,
OMP_CLAUSE_REDUCTION_MERGE (c), NULL_TREE);
TREE_SIDE_EFFECTS (OMP_CLAUSE_REDUCTION_MERGE (c)) = 1;
if (TREE_VEC_LENGTH (list) == 6)
{
if (TREE_ADDRESSABLE (TREE_VEC_ELT (list, 3)))
c_mark_addressable (decl_placeholder ? decl_placeholder
: OMP_CLAUSE_DECL (c));
if (TREE_ADDRESSABLE (TREE_VEC_ELT (list, 4)))
c_mark_addressable (placeholder);
tree init = TREE_VEC_ELT (list, 5);
if (init == error_mark_node)
init = DECL_INITIAL (TREE_VEC_ELT (list, 3));
OMP_CLAUSE_REDUCTION_INIT (c)
= c_clone_omp_udr (init, TREE_VEC_ELT (list, 4),
TREE_VEC_ELT (list, 3),
decl_placeholder ? decl_placeholder
: OMP_CLAUSE_DECL (c), placeholder);
if (TREE_VEC_ELT (list, 5) == error_mark_node)
{
tree v = decl_placeholder ? decl_placeholder : t;
OMP_CLAUSE_REDUCTION_INIT (c)
= build2 (INIT_EXPR, TREE_TYPE (v), v,
OMP_CLAUSE_REDUCTION_INIT (c));
}
if (walk_tree (&OMP_CLAUSE_REDUCTION_INIT (c),
c_find_omp_placeholder_r,
placeholder, NULL))
OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c) = 1;
}
else
{
tree init;
tree v = decl_placeholder ? decl_placeholder : t;
if (AGGREGATE_TYPE_P (TREE_TYPE (v)))
init = build_constructor (TREE_TYPE (v), NULL);
else
init = fold_convert (TREE_TYPE (v), integer_zero_node);
OMP_CLAUSE_REDUCTION_INIT (c)
= build2 (INIT_EXPR, TREE_TYPE (v), v, init);
}
OMP_CLAUSE_REDUCTION_INIT (c)
= build3_loc (OMP_CLAUSE_LOCATION (c), BIND_EXPR,
void_type_node, NULL_TREE,
OMP_CLAUSE_REDUCTION_INIT (c), NULL_TREE);
TREE_SIDE_EFFECTS (OMP_CLAUSE_REDUCTION_INIT (c)) = 1;
}
if (TREE_CODE (t) == MEM_REF)
{
if (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (t))) == NULL_TREE
|| TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (t))))
!= INTEGER_CST)
{
sorry ("variable length element type in array "
"%<reduction%> clause");
remove = true;
break;
}
t = TREE_OPERAND (t, 0);
if (TREE_CODE (t) == POINTER_PLUS_EXPR)
t = TREE_OPERAND (t, 0);
if (TREE_CODE (t) == ADDR_EXPR)
t = TREE_OPERAND (t, 0);
}
goto check_dup_generic_t;
case OMP_CLAUSE_COPYPRIVATE:
copyprivate_seen = true;
if (nowait_clause)
{
error_at (OMP_CLAUSE_LOCATION (*nowait_clause),
"%<nowait%> clause must not be used together "
"with %<copyprivate%>");
*nowait_clause = OMP_CLAUSE_CHAIN (*nowait_clause);
nowait_clause = NULL;
}
goto check_dup_generic;
case OMP_CLAUSE_COPYIN:
t = OMP_CLAUSE_DECL (c);
if (!VAR_P (t) || !DECL_THREAD_LOCAL_P (t))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE must be %<threadprivate%> for %<copyin%>", t);
remove = true;
break;
}
goto check_dup_generic;
case OMP_CLAUSE_LINEAR:
if (ort != C_ORT_OMP_DECLARE_SIMD)
need_implicitly_determined = true;
t = OMP_CLAUSE_DECL (c);
if (ort != C_ORT_OMP_DECLARE_SIMD
&& OMP_CLAUSE_LINEAR_KIND (c) != OMP_CLAUSE_LINEAR_DEFAULT)
{
error_at (OMP_CLAUSE_LOCATION (c),
"modifier should not be specified in %<linear%> "
"clause on %<simd%> or %<for%> constructs");
OMP_CLAUSE_LINEAR_KIND (c) = OMP_CLAUSE_LINEAR_DEFAULT;
}
if (!INTEGRAL_TYPE_P (TREE_TYPE (t))
&& TREE_CODE (TREE_TYPE (t)) != POINTER_TYPE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"linear clause applied to non-integral non-pointer "
"variable with type %qT", TREE_TYPE (t));
remove = true;
break;
}
if (TYPE_ATOMIC (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<_Atomic%> %qD in %<linear%> clause", t);
remove = true;
break;
}
if (ort == C_ORT_OMP_DECLARE_SIMD)
{
tree s = OMP_CLAUSE_LINEAR_STEP (c);
if (TREE_CODE (s) == PARM_DECL)
{
OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c) = 1;
/* map_head bitmap is used as uniform_head if
declare_simd. */
if (!bitmap_bit_p (&map_head, DECL_UID (s)))
linear_variable_step_check = true;
goto check_dup_generic;
}
if (TREE_CODE (s) != INTEGER_CST)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<linear%> clause step %qE is neither constant "
"nor a parameter", s);
remove = true;
break;
}
}
if (TREE_CODE (TREE_TYPE (OMP_CLAUSE_DECL (c))) == POINTER_TYPE)
{
tree s = OMP_CLAUSE_LINEAR_STEP (c);
s = pointer_int_sum (OMP_CLAUSE_LOCATION (c), PLUS_EXPR,
OMP_CLAUSE_DECL (c), s);
s = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR,
sizetype, fold_convert (sizetype, s),
fold_convert
(sizetype, OMP_CLAUSE_DECL (c)));
if (s == error_mark_node)
s = size_one_node;
OMP_CLAUSE_LINEAR_STEP (c) = s;
}
else
OMP_CLAUSE_LINEAR_STEP (c)
= fold_convert (TREE_TYPE (t), OMP_CLAUSE_LINEAR_STEP (c));
goto check_dup_generic;
check_dup_generic:
t = OMP_CLAUSE_DECL (c);
check_dup_generic_t:
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in clause %qs", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if ((ort == C_ORT_ACC
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
|| (ort == C_ORT_OMP
&& (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_PTR
|| (OMP_CLAUSE_CODE (c)
== OMP_CLAUSE_USE_DEVICE_ADDR))))
{
if (bitmap_bit_p (&oacc_reduction_head, DECL_UID (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
ort == C_ORT_ACC
? "%qD appears more than once in reduction clauses"
: "%qD appears more than once in data clauses",
t);
remove = true;
}
else
bitmap_set_bit (&oacc_reduction_head, DECL_UID (t));
}
else if (bitmap_bit_p (&generic_head, DECL_UID (t))
|| bitmap_bit_p (&firstprivate_head, DECL_UID (t))
|| bitmap_bit_p (&lastprivate_head, DECL_UID (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE appears more than once in data clauses", t);
remove = true;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
&& bitmap_bit_p (&map_head, DECL_UID (t)))
{
if (ort == C_ORT_ACC)
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in data clauses", t);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears both in data and map clauses", t);
remove = true;
}
else
bitmap_set_bit (&generic_head, DECL_UID (t));
break;
case OMP_CLAUSE_FIRSTPRIVATE:
t = OMP_CLAUSE_DECL (c);
need_complete = true;
need_implicitly_determined = true;
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in clause %<firstprivate%>", t);
remove = true;
}
else if (bitmap_bit_p (&generic_head, DECL_UID (t))
|| bitmap_bit_p (&firstprivate_head, DECL_UID (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE appears more than once in data clauses", t);
remove = true;
}
else if (bitmap_bit_p (&map_head, DECL_UID (t)))
{
if (ort == C_ORT_ACC)
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in data clauses", t);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears both in data and map clauses", t);
remove = true;
}
else
bitmap_set_bit (&firstprivate_head, DECL_UID (t));
break;
case OMP_CLAUSE_LASTPRIVATE:
t = OMP_CLAUSE_DECL (c);
need_complete = true;
need_implicitly_determined = true;
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in clause %<lastprivate%>", t);
remove = true;
}
else if (bitmap_bit_p (&generic_head, DECL_UID (t))
|| bitmap_bit_p (&lastprivate_head, DECL_UID (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE appears more than once in data clauses", t);
remove = true;
}
else
bitmap_set_bit (&lastprivate_head, DECL_UID (t));
break;
case OMP_CLAUSE_ALIGNED:
t = OMP_CLAUSE_DECL (c);
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in %<aligned%> clause", t);
remove = true;
}
else if (!POINTER_TYPE_P (TREE_TYPE (t))
&& TREE_CODE (TREE_TYPE (t)) != ARRAY_TYPE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE in %<aligned%> clause is neither a pointer nor "
"an array", t);
remove = true;
}
else if (TYPE_ATOMIC (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<_Atomic%> %qD in %<aligned%> clause", t);
remove = true;
break;
}
else if (bitmap_bit_p (&aligned_head, DECL_UID (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE appears more than once in %<aligned%> clauses",
t);
remove = true;
}
else
bitmap_set_bit (&aligned_head, DECL_UID (t));
break;
case OMP_CLAUSE_NONTEMPORAL:
t = OMP_CLAUSE_DECL (c);
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in %<nontemporal%> clause", t);
remove = true;
}
else if (bitmap_bit_p (&oacc_reduction_head, DECL_UID (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE appears more than once in %<nontemporal%> "
"clauses", t);
remove = true;
}
else
bitmap_set_bit (&oacc_reduction_head, DECL_UID (t));
break;
case OMP_CLAUSE_DEPEND:
t = OMP_CLAUSE_DECL (c);
if (t == NULL_TREE)
{
gcc_assert (OMP_CLAUSE_DEPEND_KIND (c)
== OMP_CLAUSE_DEPEND_SOURCE);
break;
}
if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK)
{
gcc_assert (TREE_CODE (t) == TREE_LIST);
for (; t; t = TREE_CHAIN (t))
{
tree decl = TREE_VALUE (t);
if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE)
{
tree offset = TREE_PURPOSE (t);
bool neg = wi::neg_p (wi::to_wide (offset));
offset = fold_unary (ABS_EXPR, TREE_TYPE (offset), offset);
tree t2 = pointer_int_sum (OMP_CLAUSE_LOCATION (c),
neg ? MINUS_EXPR : PLUS_EXPR,
decl, offset);
t2 = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR,
sizetype,
fold_convert (sizetype, t2),
fold_convert (sizetype, decl));
if (t2 == error_mark_node)
{
remove = true;
break;
}
TREE_PURPOSE (t) = t2;
}
}
break;
}
if (TREE_CODE (t) == TREE_LIST
&& TREE_PURPOSE (t)
&& TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
{
if (TREE_PURPOSE (t) != last_iterators)
last_iterators_remove
= c_omp_finish_iterators (TREE_PURPOSE (t));
last_iterators = TREE_PURPOSE (t);
t = TREE_VALUE (t);
if (last_iterators_remove)
t = error_mark_node;
}
else
last_iterators = NULL_TREE;
if (TREE_CODE (t) == TREE_LIST)
{
if (handle_omp_array_sections (c, ort))
remove = true;
else if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_DEPOBJ)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<depend%> clause with %<depobj%> dependence "
"type on array section");
remove = true;
}
break;
}
if (t == error_mark_node)
remove = true;
else if (!lvalue_p (t))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not lvalue expression nor array section in "
"%<depend%> clause", t);
remove = true;
}
else if (TREE_CODE (t) == COMPONENT_REF
&& DECL_C_BIT_FIELD (TREE_OPERAND (t, 1)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"bit-field %qE in %qs clause", t, "depend");
remove = true;
}
else if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_DEPOBJ)
{
if (!c_omp_depend_t_p (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE does not have %<omp_depend_t%> type in "
"%<depend%> clause with %<depobj%> dependence "
"type", t);
remove = true;
}
}
else if (c_omp_depend_t_p (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE should not have %<omp_depend_t%> type in "
"%<depend%> clause with dependence type other than "
"%<depobj%>", t);
remove = true;
}
if (!remove)
{
tree addr = build_unary_op (OMP_CLAUSE_LOCATION (c), ADDR_EXPR,
t, false);
if (addr == error_mark_node)
remove = true;
else
{
t = build_indirect_ref (OMP_CLAUSE_LOCATION (c), addr,
RO_UNARY_STAR);
if (t == error_mark_node)
remove = true;
else if (TREE_CODE (OMP_CLAUSE_DECL (c)) == TREE_LIST
&& TREE_PURPOSE (OMP_CLAUSE_DECL (c))
&& (TREE_CODE (TREE_PURPOSE (OMP_CLAUSE_DECL (c)))
== TREE_VEC))
TREE_VALUE (OMP_CLAUSE_DECL (c)) = t;
else
OMP_CLAUSE_DECL (c) = t;
}
}
break;
case OMP_CLAUSE_MAP:
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE__CACHE_:
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) == TREE_LIST)
{
if (handle_omp_array_sections (c, ort))
remove = true;
else
{
t = OMP_CLAUSE_DECL (c);
if (!lang_hooks.types.omp_mappable_type (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"array section does not have mappable type "
"in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (TYPE_ATOMIC (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<_Atomic%> %qE in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
while (TREE_CODE (t) == ARRAY_REF)
t = TREE_OPERAND (t, 0);
if (TREE_CODE (t) == COMPONENT_REF
&& TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
{
while (TREE_CODE (t) == COMPONENT_REF)
t = TREE_OPERAND (t, 0);
if (bitmap_bit_p (&map_field_head, DECL_UID (t)))
break;
if (bitmap_bit_p (&map_head, DECL_UID (t)))
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP)
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in motion "
"clauses", t);
else if (ort == C_ORT_ACC)
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in data "
"clauses", t);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in map "
"clauses", t);
remove = true;
}
else
{
bitmap_set_bit (&map_head, DECL_UID (t));
bitmap_set_bit (&map_field_head, DECL_UID (t));
}
}
}
if (c_oacc_check_attachments (c))
remove = true;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH))
/* In this case, we have a single array element which is a
pointer, and we already set OMP_CLAUSE_SIZE in
handle_omp_array_sections above. For attach/detach clauses,
reset the OMP_CLAUSE_SIZE (representing a bias) to zero
here. */
OMP_CLAUSE_SIZE (c) = size_zero_node;
break;
}
if (t == error_mark_node)
{
remove = true;
break;
}
/* OpenACC attach / detach clauses must be pointers. */
if (c_oacc_check_attachments (c))
{
remove = true;
break;
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH))
/* For attach/detach clauses, set OMP_CLAUSE_SIZE (representing a
bias) to zero here, so it is not set erroneously to the pointer
size later on in gimplify.c. */
OMP_CLAUSE_SIZE (c) = size_zero_node;
if (TREE_CODE (t) == COMPONENT_REF
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE__CACHE_)
{
if (DECL_BIT_FIELD (TREE_OPERAND (t, 1)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"bit-field %qE in %qs clause",
t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (!lang_hooks.types.omp_mappable_type (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE does not have a mappable type in %qs clause",
t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (TYPE_ATOMIC (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<_Atomic%> %qE in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
while (TREE_CODE (t) == COMPONENT_REF)
{
if (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0)))
== UNION_TYPE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is a member of a union", t);
remove = true;
break;
}
t = TREE_OPERAND (t, 0);
if (ort == C_ORT_ACC && TREE_CODE (t) == MEM_REF)
{
if (maybe_ne (mem_ref_offset (t), 0))
error_at (OMP_CLAUSE_LOCATION (c),
"cannot dereference %qE in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
else
t = TREE_OPERAND (t, 0);
}
}
if (remove)
break;
if (VAR_P (t) || TREE_CODE (t) == PARM_DECL)
{
if (bitmap_bit_p (&map_field_head, DECL_UID (t)))
break;
}
}
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (VAR_P (t) && DECL_THREAD_LOCAL_P (t))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD is threadprivate variable in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
|| (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_FIRSTPRIVATE_POINTER))
&& !c_mark_addressable (t))
remove = true;
else if (!(OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
|| (OMP_CLAUSE_MAP_KIND (c)
== GOMP_MAP_FIRSTPRIVATE_POINTER)
|| (OMP_CLAUSE_MAP_KIND (c)
== GOMP_MAP_FORCE_DEVICEPTR)))
&& t == OMP_CLAUSE_DECL (c)
&& !lang_hooks.types.omp_mappable_type (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD does not have a mappable type in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (TREE_TYPE (t) == error_mark_node)
remove = true;
else if (TYPE_ATOMIC (strip_array_types (TREE_TYPE (t))))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<_Atomic%> %qE in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER)
{
if (bitmap_bit_p (&generic_head, DECL_UID (t))
|| bitmap_bit_p (&firstprivate_head, DECL_UID (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in data clauses", t);
remove = true;
}
else if (bitmap_bit_p (&map_head, DECL_UID (t)))
{
if (ort == C_ORT_ACC)
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in data clauses", t);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears both in data and map clauses", t);
remove = true;
}
else
bitmap_set_bit (&generic_head, DECL_UID (t));
}
else if (bitmap_bit_p (&map_head, DECL_UID (t)))
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP)
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in motion clauses", t);
else if (ort == C_ORT_ACC)
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in data clauses", t);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in map clauses", t);
remove = true;
}
else if (bitmap_bit_p (&generic_head, DECL_UID (t))
|| bitmap_bit_p (&firstprivate_head, DECL_UID (t)))
{
if (ort == C_ORT_ACC)
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in data clauses", t);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears both in data and map clauses", t);
remove = true;
}
else
{
bitmap_set_bit (&map_head, DECL_UID (t));
if (t != OMP_CLAUSE_DECL (c)
&& TREE_CODE (OMP_CLAUSE_DECL (c)) == COMPONENT_REF)
bitmap_set_bit (&map_field_head, DECL_UID (t));
}
break;
case OMP_CLAUSE_TO_DECLARE:
case OMP_CLAUSE_LINK:
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) == FUNCTION_DECL
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO_DECLARE)
;
else if (!VAR_P (t))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO_DECLARE)
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is neither a variable nor a function name in "
"clause %qs", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in clause %qs", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (DECL_THREAD_LOCAL_P (t))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD is threadprivate variable in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (!lang_hooks.types.omp_mappable_type (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD does not have a mappable type in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
if (remove)
break;
if (bitmap_bit_p (&generic_head, DECL_UID (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE appears more than once on the same "
"%<declare target%> directive", t);
remove = true;
}
else
bitmap_set_bit (&generic_head, DECL_UID (t));
break;
case OMP_CLAUSE_UNIFORM:
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) != PARM_DECL)
{
if (DECL_P (t))
error_at (OMP_CLAUSE_LOCATION (c),
"%qD is not an argument in %<uniform%> clause", t);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not an argument in %<uniform%> clause", t);
remove = true;
break;
}
/* map_head bitmap is used as uniform_head if declare_simd. */
bitmap_set_bit (&map_head, DECL_UID (t));
goto check_dup_generic;
case OMP_CLAUSE_IS_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_PTR:
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (TREE_TYPE (t)) != POINTER_TYPE)
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_PTR
&& ort == C_ORT_OMP)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qs variable is not a pointer",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (TREE_CODE (TREE_TYPE (t)) != ARRAY_TYPE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qs variable is neither a pointer nor an array",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
}
goto check_dup_generic;
case OMP_CLAUSE_USE_DEVICE_ADDR:
t = OMP_CLAUSE_DECL (c);
if (VAR_P (t) || TREE_CODE (t) == PARM_DECL)
c_mark_addressable (t);
goto check_dup_generic;
case OMP_CLAUSE_NOWAIT:
if (copyprivate_seen)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<nowait%> clause must not be used together "
"with %<copyprivate%>");
remove = true;
break;
}
nowait_clause = pc;
pc = &OMP_CLAUSE_CHAIN (c);
continue;
case OMP_CLAUSE_ORDER:
if (ordered_clause)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<order%> clause must not be used together "
"with %<ordered%>");
remove = true;
break;
}
else if (order_clause)
{
/* Silently remove duplicates. */
remove = true;
break;
}
order_clause = pc;
pc = &OMP_CLAUSE_CHAIN (c);
continue;
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_PARALLEL:
case OMP_CLAUSE_FOR:
case OMP_CLAUSE_SECTIONS:
case OMP_CLAUSE_TASKGROUP:
case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_DEVICE_TYPE:
case OMP_CLAUSE_PRIORITY:
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
case OMP_CLAUSE_HINT:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_BIND:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_SEQ:
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
case OMP_CLAUSE_TILE:
case OMP_CLAUSE_IF_PRESENT:
case OMP_CLAUSE_FINALIZE:
pc = &OMP_CLAUSE_CHAIN (c);
continue;
case OMP_CLAUSE_NOGROUP:
nogroup_seen = pc;
pc = &OMP_CLAUSE_CHAIN (c);
continue;
case OMP_CLAUSE_SCHEDULE:
schedule_clause = c;
pc = &OMP_CLAUSE_CHAIN (c);
continue;
case OMP_CLAUSE_ORDERED:
ordered_clause = c;
if (order_clause)
{
error_at (OMP_CLAUSE_LOCATION (*order_clause),
"%<order%> clause must not be used together "
"with %<ordered%>");
*order_clause = OMP_CLAUSE_CHAIN (*order_clause);
order_clause = NULL;
}
pc = &OMP_CLAUSE_CHAIN (c);
continue;
case OMP_CLAUSE_SAFELEN:
safelen = c;
pc = &OMP_CLAUSE_CHAIN (c);
continue;
case OMP_CLAUSE_SIMDLEN:
simdlen = c;
pc = &OMP_CLAUSE_CHAIN (c);
continue;
case OMP_CLAUSE_INBRANCH:
case OMP_CLAUSE_NOTINBRANCH:
if (branch_seen)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<inbranch%> clause is incompatible with "
"%<notinbranch%>");
remove = true;
break;
}
branch_seen = true;
pc = &OMP_CLAUSE_CHAIN (c);
continue;
case OMP_CLAUSE_INCLUSIVE:
case OMP_CLAUSE_EXCLUSIVE:
need_complete = true;
need_implicitly_determined = true;
t = OMP_CLAUSE_DECL (c);
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in clause %qs", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
break;
default:
gcc_unreachable ();
}
if (!remove)
{
t = OMP_CLAUSE_DECL (c);
if (need_complete)
{
t = require_complete_type (OMP_CLAUSE_LOCATION (c), t);
if (t == error_mark_node)
remove = true;
}
if (need_implicitly_determined)
{
const char *share_name = NULL;
if (VAR_P (t) && DECL_THREAD_LOCAL_P (t))
share_name = "threadprivate";
else switch (c_omp_predetermined_sharing (t))
{
case OMP_CLAUSE_DEFAULT_UNSPECIFIED:
break;
case OMP_CLAUSE_DEFAULT_SHARED:
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
&& c_omp_predefined_variable (t))
/* The __func__ variable and similar function-local
predefined variables may be listed in a shared or
firstprivate clause. */
break;
share_name = "shared";
break;
case OMP_CLAUSE_DEFAULT_PRIVATE:
share_name = "private";
break;
default:
gcc_unreachable ();
}
if (share_name)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is predetermined %qs for %qs",
t, share_name,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (TREE_READONLY (t)
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SHARED
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_FIRSTPRIVATE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<const%> qualified %qE may appear only in "
"%<shared%> or %<firstprivate%> clauses", t);
remove = true;
}
}
}
if (remove)
*pc = OMP_CLAUSE_CHAIN (c);
else
pc = &OMP_CLAUSE_CHAIN (c);
}
if (simdlen
&& safelen
&& tree_int_cst_lt (OMP_CLAUSE_SAFELEN_EXPR (safelen),
OMP_CLAUSE_SIMDLEN_EXPR (simdlen)))
{
error_at (OMP_CLAUSE_LOCATION (simdlen),
"%<simdlen%> clause value is bigger than "
"%<safelen%> clause value");
OMP_CLAUSE_SIMDLEN_EXPR (simdlen)
= OMP_CLAUSE_SAFELEN_EXPR (safelen);
}
if (ordered_clause
&& schedule_clause
&& (OMP_CLAUSE_SCHEDULE_KIND (schedule_clause)
& OMP_CLAUSE_SCHEDULE_NONMONOTONIC))
{
error_at (OMP_CLAUSE_LOCATION (schedule_clause),
"%<nonmonotonic%> schedule modifier specified together "
"with %<ordered%> clause");
OMP_CLAUSE_SCHEDULE_KIND (schedule_clause)
= (enum omp_clause_schedule_kind)
(OMP_CLAUSE_SCHEDULE_KIND (schedule_clause)
& ~OMP_CLAUSE_SCHEDULE_NONMONOTONIC);
}
if (reduction_seen < 0 && ordered_clause)
{
error_at (OMP_CLAUSE_LOCATION (ordered_clause),
"%qs clause specified together with %<inscan%> "
"%<reduction%> clause", "ordered");
reduction_seen = -2;
}
if (reduction_seen < 0 && schedule_clause)
{
error_at (OMP_CLAUSE_LOCATION (schedule_clause),
"%qs clause specified together with %<inscan%> "
"%<reduction%> clause", "schedule");
reduction_seen = -2;
}
if (linear_variable_step_check || reduction_seen == -2)
for (pc = &clauses, c = clauses; c ; c = *pc)
{
bool remove = false;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c)
&& !bitmap_bit_p (&map_head,
DECL_UID (OMP_CLAUSE_LINEAR_STEP (c))))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<linear%> clause step is a parameter %qD not "
"specified in %<uniform%> clause",
OMP_CLAUSE_LINEAR_STEP (c));
remove = true;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
OMP_CLAUSE_REDUCTION_INSCAN (c) = 0;
if (remove)
*pc = OMP_CLAUSE_CHAIN (c);
else
pc = &OMP_CLAUSE_CHAIN (c);
}
if (nogroup_seen && reduction_seen)
{
error_at (OMP_CLAUSE_LOCATION (*nogroup_seen),
"%<nogroup%> clause must not be used together with "
"%<reduction%> clause");
*nogroup_seen = OMP_CLAUSE_CHAIN (*nogroup_seen);
}
bitmap_obstack_release (NULL);
return clauses;
}
/* Return code to initialize DST with a copy constructor from SRC.
C doesn't have copy constructors nor assignment operators, only for
_Atomic vars we need to perform __atomic_load from src into a temporary
followed by __atomic_store of the temporary to dst. */
tree
c_omp_clause_copy_ctor (tree clause, tree dst, tree src)
{
if (!really_atomic_lvalue (dst) && !really_atomic_lvalue (src))
return build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
location_t loc = OMP_CLAUSE_LOCATION (clause);
tree type = TREE_TYPE (dst);
tree nonatomic_type = build_qualified_type (type, TYPE_UNQUALIFIED);
tree tmp = create_tmp_var (nonatomic_type);
tree tmp_addr = build_fold_addr_expr (tmp);
TREE_ADDRESSABLE (tmp) = 1;
TREE_NO_WARNING (tmp) = 1;
tree src_addr = build_fold_addr_expr (src);
tree dst_addr = build_fold_addr_expr (dst);
tree seq_cst = build_int_cst (integer_type_node, MEMMODEL_SEQ_CST);
vec<tree, va_gc> *params;
/* Expansion of a generic atomic load may require an addition
element, so allocate enough to prevent a resize. */
vec_alloc (params, 4);
/* Build __atomic_load (&src, &tmp, SEQ_CST); */
tree fndecl = builtin_decl_explicit (BUILT_IN_ATOMIC_LOAD);
params->quick_push (src_addr);
params->quick_push (tmp_addr);
params->quick_push (seq_cst);
tree load = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL);
vec_alloc (params, 4);
/* Build __atomic_store (&dst, &tmp, SEQ_CST); */
fndecl = builtin_decl_explicit (BUILT_IN_ATOMIC_STORE);
params->quick_push (dst_addr);
params->quick_push (tmp_addr);
params->quick_push (seq_cst);
tree store = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL);
return build2 (COMPOUND_EXPR, void_type_node, load, store);
}
/* Create a transaction node. */
tree
c_finish_transaction (location_t loc, tree block, int flags)
{
tree stmt = build_stmt (loc, TRANSACTION_EXPR, block);
if (flags & TM_STMT_ATTR_OUTER)
TRANSACTION_EXPR_OUTER (stmt) = 1;
if (flags & TM_STMT_ATTR_RELAXED)
TRANSACTION_EXPR_RELAXED (stmt) = 1;
return add_stmt (stmt);
}
/* Make a variant type in the proper way for C/C++, propagating qualifiers
down to the element type of an array. If ORIG_QUAL_TYPE is not
NULL, then it should be used as the qualified type
ORIG_QUAL_INDIRECT levels down in array type derivation (to
preserve information about the typedef name from which an array
type was derived). */
tree
c_build_qualified_type (tree type, int type_quals, tree orig_qual_type,
size_t orig_qual_indirect)
{
if (type == error_mark_node)
return type;
if (TREE_CODE (type) == ARRAY_TYPE)
{
tree t;
tree element_type = c_build_qualified_type (TREE_TYPE (type),
type_quals, orig_qual_type,
orig_qual_indirect - 1);
/* See if we already have an identically qualified type. */
if (orig_qual_type && orig_qual_indirect == 0)
t = orig_qual_type;
else
for (t = TYPE_MAIN_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
{
if (TYPE_QUALS (strip_array_types (t)) == type_quals
&& TYPE_NAME (t) == TYPE_NAME (type)
&& TYPE_CONTEXT (t) == TYPE_CONTEXT (type)
&& attribute_list_equal (TYPE_ATTRIBUTES (t),
TYPE_ATTRIBUTES (type)))
break;
}
if (!t)
{
tree domain = TYPE_DOMAIN (type);
t = build_variant_type_copy (type);
TREE_TYPE (t) = element_type;
if (TYPE_STRUCTURAL_EQUALITY_P (element_type)
|| (domain && TYPE_STRUCTURAL_EQUALITY_P (domain)))
SET_TYPE_STRUCTURAL_EQUALITY (t);
else if (TYPE_CANONICAL (element_type) != element_type
|| (domain && TYPE_CANONICAL (domain) != domain))
{
tree unqualified_canon
= build_array_type (TYPE_CANONICAL (element_type),
domain? TYPE_CANONICAL (domain)
: NULL_TREE);
if (TYPE_REVERSE_STORAGE_ORDER (type))
{
unqualified_canon
= build_distinct_type_copy (unqualified_canon);
TYPE_REVERSE_STORAGE_ORDER (unqualified_canon) = 1;
}
TYPE_CANONICAL (t)
= c_build_qualified_type (unqualified_canon, type_quals);
}
else
TYPE_CANONICAL (t) = t;
}
return t;
}
/* A restrict-qualified pointer type must be a pointer to object or
incomplete type. Note that the use of POINTER_TYPE_P also allows
REFERENCE_TYPEs, which is appropriate for C++. */
if ((type_quals & TYPE_QUAL_RESTRICT)
&& (!POINTER_TYPE_P (type)
|| !C_TYPE_OBJECT_OR_INCOMPLETE_P (TREE_TYPE (type))))
{
error ("invalid use of %<restrict%>");
type_quals &= ~TYPE_QUAL_RESTRICT;
}
tree var_type = (orig_qual_type && orig_qual_indirect == 0
? orig_qual_type
: build_qualified_type (type, type_quals));
/* A variant type does not inherit the list of incomplete vars from the
type main variant. */
if ((RECORD_OR_UNION_TYPE_P (var_type)
|| TREE_CODE (var_type) == ENUMERAL_TYPE)
&& TYPE_MAIN_VARIANT (var_type) != var_type)
C_TYPE_INCOMPLETE_VARS (var_type) = 0;
return var_type;
}
/* Build a VA_ARG_EXPR for the C parser. */
tree
c_build_va_arg (location_t loc1, tree expr, location_t loc2, tree type)
{
if (error_operand_p (type))
return error_mark_node;
/* VA_ARG_EXPR cannot be used for a scalar va_list with reverse storage
order because it takes the address of the expression. */
else if (handled_component_p (expr)
&& reverse_storage_order_for_component_p (expr))
{
error_at (loc1, "cannot use %<va_arg%> with reverse storage order");
return error_mark_node;
}
else if (!COMPLETE_TYPE_P (type))
{
error_at (loc2, "second argument to %<va_arg%> is of incomplete "
"type %qT", type);
return error_mark_node;
}
else if (warn_cxx_compat && TREE_CODE (type) == ENUMERAL_TYPE)
warning_at (loc2, OPT_Wc___compat,
"C++ requires promoted type, not enum type, in %<va_arg%>");
return build_va_arg (loc2, expr, type);
}
/* Return truthvalue of whether T1 is the same tree structure as T2.
Return 1 if they are the same. Return false if they are different. */
bool
c_tree_equal (tree t1, tree t2)
{
enum tree_code code1, code2;
if (t1 == t2)
return true;
if (!t1 || !t2)
return false;
for (code1 = TREE_CODE (t1);
CONVERT_EXPR_CODE_P (code1)
|| code1 == NON_LVALUE_EXPR;
code1 = TREE_CODE (t1))
t1 = TREE_OPERAND (t1, 0);
for (code2 = TREE_CODE (t2);
CONVERT_EXPR_CODE_P (code2)
|| code2 == NON_LVALUE_EXPR;
code2 = TREE_CODE (t2))
t2 = TREE_OPERAND (t2, 0);
/* They might have become equal now. */
if (t1 == t2)
return true;
if (code1 != code2)
return false;
switch (code1)
{
case INTEGER_CST:
return wi::to_wide (t1) == wi::to_wide (t2);
case REAL_CST:
return real_equal (&TREE_REAL_CST (t1), &TREE_REAL_CST (t2));
case STRING_CST:
return TREE_STRING_LENGTH (t1) == TREE_STRING_LENGTH (t2)
&& !memcmp (TREE_STRING_POINTER (t1), TREE_STRING_POINTER (t2),
TREE_STRING_LENGTH (t1));
case FIXED_CST:
return FIXED_VALUES_IDENTICAL (TREE_FIXED_CST (t1),
TREE_FIXED_CST (t2));
case COMPLEX_CST:
return c_tree_equal (TREE_REALPART (t1), TREE_REALPART (t2))
&& c_tree_equal (TREE_IMAGPART (t1), TREE_IMAGPART (t2));
case VECTOR_CST:
return operand_equal_p (t1, t2, OEP_ONLY_CONST);
case CONSTRUCTOR:
/* We need to do this when determining whether or not two
non-type pointer to member function template arguments
are the same. */
if (!comptypes (TREE_TYPE (t1), TREE_TYPE (t2))
|| CONSTRUCTOR_NELTS (t1) != CONSTRUCTOR_NELTS (t2))
return false;
{
tree field, value;
unsigned int i;
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (t1), i, field, value)
{
constructor_elt *elt2 = CONSTRUCTOR_ELT (t2, i);
if (!c_tree_equal (field, elt2->index)
|| !c_tree_equal (value, elt2->value))
return false;
}
}
return true;
case TREE_LIST:
if (!c_tree_equal (TREE_PURPOSE (t1), TREE_PURPOSE (t2)))
return false;
if (!c_tree_equal (TREE_VALUE (t1), TREE_VALUE (t2)))
return false;
return c_tree_equal (TREE_CHAIN (t1), TREE_CHAIN (t2));
case SAVE_EXPR:
return c_tree_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0));
case CALL_EXPR:
{
tree arg1, arg2;
call_expr_arg_iterator iter1, iter2;
if (!c_tree_equal (CALL_EXPR_FN (t1), CALL_EXPR_FN (t2)))
return false;
for (arg1 = first_call_expr_arg (t1, &iter1),
arg2 = first_call_expr_arg (t2, &iter2);
arg1 && arg2;
arg1 = next_call_expr_arg (&iter1),
arg2 = next_call_expr_arg (&iter2))
if (!c_tree_equal (arg1, arg2))
return false;
if (arg1 || arg2)
return false;
return true;
}
case TARGET_EXPR:
{
tree o1 = TREE_OPERAND (t1, 0);
tree o2 = TREE_OPERAND (t2, 0);
/* Special case: if either target is an unallocated VAR_DECL,
it means that it's going to be unified with whatever the
TARGET_EXPR is really supposed to initialize, so treat it
as being equivalent to anything. */
if (VAR_P (o1) && DECL_NAME (o1) == NULL_TREE
&& !DECL_RTL_SET_P (o1))
/*Nop*/;
else if (VAR_P (o2) && DECL_NAME (o2) == NULL_TREE
&& !DECL_RTL_SET_P (o2))
/*Nop*/;
else if (!c_tree_equal (o1, o2))
return false;
return c_tree_equal (TREE_OPERAND (t1, 1), TREE_OPERAND (t2, 1));
}
case COMPONENT_REF:
if (TREE_OPERAND (t1, 1) != TREE_OPERAND (t2, 1))
return false;
return c_tree_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0));
case PARM_DECL:
case VAR_DECL:
case CONST_DECL:
case FIELD_DECL:
case FUNCTION_DECL:
case IDENTIFIER_NODE:
case SSA_NAME:
return false;
case TREE_VEC:
{
unsigned ix;
if (TREE_VEC_LENGTH (t1) != TREE_VEC_LENGTH (t2))
return false;
for (ix = TREE_VEC_LENGTH (t1); ix--;)
if (!c_tree_equal (TREE_VEC_ELT (t1, ix),
TREE_VEC_ELT (t2, ix)))
return false;
return true;
}
default:
break;
}
switch (TREE_CODE_CLASS (code1))
{
case tcc_unary:
case tcc_binary:
case tcc_comparison:
case tcc_expression:
case tcc_vl_exp:
case tcc_reference:
case tcc_statement:
{
int i, n = TREE_OPERAND_LENGTH (t1);
switch (code1)
{
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
case POSTINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
n = 1;
break;
case ARRAY_REF:
n = 2;
break;
default:
break;
}
if (TREE_CODE_CLASS (code1) == tcc_vl_exp
&& n != TREE_OPERAND_LENGTH (t2))
return false;
for (i = 0; i < n; ++i)
if (!c_tree_equal (TREE_OPERAND (t1, i), TREE_OPERAND (t2, i)))
return false;
return true;
}
case tcc_type:
return comptypes (t1, t2);
default:
gcc_unreachable ();
}
/* We can get here with --disable-checking. */
return false;
}
/* Returns true when the function declaration FNDECL is implicit,
introduced as a result of a call to an otherwise undeclared
function, and false otherwise. */
bool
c_decl_implicit (const_tree fndecl)
{
return C_DECL_IMPLICIT (fndecl);
}
|
c-tree.h | /* Definitions for C parsing and type checking.
Copyright (C) 1987-2017 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_C_TREE_H
#define GCC_C_TREE_H
#include "c-family/c-common.h"
#include "diagnostic.h"
/* struct lang_identifier is private to c-decl.c, but langhooks.c needs to
know how big it is. This is sanity-checked in c-decl.c. */
#define C_SIZEOF_STRUCT_LANG_IDENTIFIER \
(sizeof (struct c_common_identifier) + 3 * sizeof (void *))
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */
#define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE
nonzero if the definition of the type has already started. */
#define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE)
/* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable
declarations whose type would be completed by completing that type. */
#define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE)
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID)
/* Record whether a type or decl was written with nonconstant size.
Note that TYPE_SIZE may have simplified to a constant. */
#define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE)
#define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE)
/* Record whether a type is defined inside a struct or union type.
This is used for -Wc++-compat. */
#define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE)
/* Record whether an "incomplete type" error was given for the type. */
#define C_TYPE_ERROR_REPORTED(TYPE) TYPE_LANG_FLAG_3 (TYPE)
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was defined without an explicit
return type. */
#define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */
#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP)
/* For a PARM_DECL, nonzero if it was declared as an array. */
#define C_ARRAY_PARAMETER(NODE) DECL_LANG_FLAG_0 (NODE)
/* For FUNCTION_DECLs, evaluates true if the decl is built-in but has
been declared. */
#define C_DECL_DECLARED_BUILTIN(EXP) \
DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP))
/* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a
built-in prototype and does not have a non-built-in prototype. */
#define C_DECL_BUILTIN_PROTOTYPE(EXP) \
DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a decl was declared register. This is strictly a
front-end flag, whereas DECL_REGISTER is used for code generation;
they may differ for structures with volatile fields. */
#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP)
/* Record whether a decl was used in an expression anywhere except an
unevaluated operand of sizeof / typeof / alignof. This is only
used for functions declared static but not defined, though outside
sizeof and typeof it is set for other function decls as well. */
#define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a variable has been declared threadprivate by
#pragma omp threadprivate. */
#define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL))
/* Nonzero for a decl which either doesn't exist or isn't a prototype.
N.B. Could be simplified if all built-in decls had complete prototypes
(but this is presently difficult because some of them need FILE*). */
#define C_DECL_ISNT_PROTOTYPE(EXP) \
(EXP == 0 \
|| (!prototype_p (TREE_TYPE (EXP)) \
&& !DECL_BUILT_IN (EXP)))
/* For FUNCTION_TYPE, a hidden list of types of arguments. The same as
TYPE_ARG_TYPES for functions with prototypes, but created for functions
without prototypes. */
#define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE)
/* For a CONSTRUCTOR, whether some initializer contains a
subexpression meaning it is not a constant expression. */
#define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR))
/* Record parser information about an expression that is irrelevant
for code generation alongside a tree representing its value. */
struct c_expr
{
/* The value of the expression. */
tree value;
/* Record the original unary/binary operator of an expression, which may
have been changed by fold, STRING_CST for unparenthesized string
constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls
(even if parenthesized), for subexpressions, and for non-constant
initializers, or ERROR_MARK for other expressions (including
parenthesized expressions). */
enum tree_code original_code;
/* If not NULL, the original type of an expression. This will
differ from the type of the value field for an enum constant.
The type of an enum constant is a plain integer type, but this
field will be the enum type. */
tree original_type;
/* The source range of this expression. This is redundant
for node values that have locations, but not all node kinds
have locations (e.g. constants, and references to params, locals,
etc), so we stash a copy here. */
source_range src_range;
/* Access to the first and last locations within the source spelling
of this expression. */
location_t get_start () const { return src_range.m_start; }
location_t get_finish () const { return src_range.m_finish; }
/* Set the value to error_mark_node whilst ensuring that src_range
is initialized. */
void set_error ()
{
value = error_mark_node;
src_range.m_start = UNKNOWN_LOCATION;
src_range.m_finish = UNKNOWN_LOCATION;
}
};
/* Type alias for struct c_expr. This allows to use the structure
inside the VEC types. */
typedef struct c_expr c_expr_t;
/* A kind of type specifier. Note that this information is currently
only used to distinguish tag definitions, tag references and typeof
uses. */
enum c_typespec_kind {
/* No typespec. This appears only in struct c_declspec. */
ctsk_none,
/* A reserved keyword type specifier. */
ctsk_resword,
/* A reference to a tag, previously declared, such as "struct foo".
This includes where the previous declaration was as a different
kind of tag, in which case this is only valid if shadowing that
tag in an inner scope. */
ctsk_tagref,
/* A reference to a tag, not previously declared in a visible
scope. */
ctsk_tagfirstref,
/* A definition of a tag such as "struct foo { int a; }". */
ctsk_tagdef,
/* A typedef name. */
ctsk_typedef,
/* An ObjC-specific kind of type specifier. */
ctsk_objc,
/* A typeof specifier, or _Atomic ( type-name ). */
ctsk_typeof
};
/* A type specifier: this structure is created in the parser and
passed to declspecs_add_type only. */
struct c_typespec {
/* What kind of type specifier this is. */
enum c_typespec_kind kind;
/* Whether the expression has operands suitable for use in constant
expressions. */
bool expr_const_operands;
/* The specifier itself. */
tree spec;
/* An expression to be evaluated before the type specifier, in the
case of typeof specifiers, or NULL otherwise or if no such
expression is required for a particular typeof specifier. In
particular, when typeof is applied to an expression of variably
modified type, that expression must be evaluated in order to
determine array sizes that form part of the type, but the
expression itself (as opposed to the array sizes) forms no part
of the type and so needs to be recorded separately. */
tree expr;
};
/* A storage class specifier. */
enum c_storage_class {
csc_none,
csc_auto,
csc_extern,
csc_register,
csc_static,
csc_typedef
};
/* A type specifier keyword "void", "_Bool", "char", "int", "float",
"double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum",
or none of these. */
enum c_typespec_keyword {
cts_none,
cts_void,
cts_bool,
cts_char,
cts_int,
cts_float,
cts_int_n,
cts_double,
cts_dfloat32,
cts_dfloat64,
cts_dfloat128,
cts_floatn_nx,
cts_fract,
cts_accum,
cts_auto_type
};
/* This enum lists all the possible declarator specifiers, storage
class or attribute that a user can write. There is at least one
enumerator per possible declarator specifier in the struct
c_declspecs below.
It is used to index the array of declspec locations in struct
c_declspecs. */
enum c_declspec_word {
cdw_typespec /* A catch-all for a typespec. */,
cdw_storage_class /* A catch-all for a storage class */,
cdw_attributes,
cdw_typedef,
cdw_explicit_signed,
cdw_deprecated,
cdw_default_int,
cdw_long,
cdw_long_long,
cdw_short,
cdw_signed,
cdw_unsigned,
cdw_complex,
cdw_inline,
cdw_noreturn,
cdw_thread,
cdw_const,
cdw_volatile,
cdw_restrict,
cdw_atomic,
cdw_saturating,
cdw_alignas,
cdw_address_space,
cdw_gimple,
cdw_rtl,
cdw_number_of_elements /* This one must always be the last
enumerator. */
};
/* A sequence of declaration specifiers in C. When a new declaration
specifier is added, please update the enum c_declspec_word above
accordingly. */
struct c_declspecs {
source_location locations[cdw_number_of_elements];
/* The type specified, if a single type specifier such as a struct,
union or enum specifier, typedef name or typeof specifies the
whole type, or NULL_TREE if none or a keyword such as "void" or
"char" is used. Does not include qualifiers. */
tree type;
/* Any expression to be evaluated before the type, from a typeof
specifier. */
tree expr;
/* The attributes from a typedef decl. */
tree decl_attr;
/* When parsing, the attributes. Outside the parser, this will be
NULL; attributes (possibly from multiple lists) will be passed
separately. */
tree attrs;
/* The pass to start compiling a __GIMPLE or __RTL function with. */
char *gimple_or_rtl_pass;
/* The base-2 log of the greatest alignment required by an _Alignas
specifier, in bytes, or -1 if no such specifiers with nonzero
alignment. */
int align_log;
/* For the __intN declspec, this stores the index into the int_n_* arrays. */
int int_n_idx;
/* For the _FloatN and _FloatNx declspec, this stores the index into
the floatn_nx_types array. */
int floatn_nx_idx;
/* The storage class specifier, or csc_none if none. */
enum c_storage_class storage_class;
/* Any type specifier keyword used such as "int", not reflecting
modifiers such as "short", or cts_none if none. */
ENUM_BITFIELD (c_typespec_keyword) typespec_word : 8;
/* The kind of type specifier if one has been seen, ctsk_none
otherwise. */
ENUM_BITFIELD (c_typespec_kind) typespec_kind : 3;
/* Whether any expressions in typeof specifiers may appear in
constant expressions. */
BOOL_BITFIELD expr_const_operands : 1;
/* Whether any declaration specifiers have been seen at all. */
BOOL_BITFIELD declspecs_seen_p : 1;
/* Whether something other than a storage class specifier or
attribute has been seen. This is used to warn for the
obsolescent usage of storage class specifiers other than at the
start of the list. (Doing this properly would require function
specifiers to be handled separately from storage class
specifiers.) */
BOOL_BITFIELD non_sc_seen_p : 1;
/* Whether the type is specified by a typedef or typeof name. */
BOOL_BITFIELD typedef_p : 1;
/* Whether the type is explicitly "signed" or specified by a typedef
whose type is explicitly "signed". */
BOOL_BITFIELD explicit_signed_p : 1;
/* Whether the specifiers include a deprecated typedef. */
BOOL_BITFIELD deprecated_p : 1;
/* Whether the type defaulted to "int" because there were no type
specifiers. */
BOOL_BITFIELD default_int_p : 1;
/* Whether "long" was specified. */
BOOL_BITFIELD long_p : 1;
/* Whether "long" was specified more than once. */
BOOL_BITFIELD long_long_p : 1;
/* Whether "short" was specified. */
BOOL_BITFIELD short_p : 1;
/* Whether "signed" was specified. */
BOOL_BITFIELD signed_p : 1;
/* Whether "unsigned" was specified. */
BOOL_BITFIELD unsigned_p : 1;
/* Whether "complex" was specified. */
BOOL_BITFIELD complex_p : 1;
/* Whether "inline" was specified. */
BOOL_BITFIELD inline_p : 1;
/* Whether "_Noreturn" was speciied. */
BOOL_BITFIELD noreturn_p : 1;
/* Whether "__thread" or "_Thread_local" was specified. */
BOOL_BITFIELD thread_p : 1;
/* Whether "__thread" rather than "_Thread_local" was specified. */
BOOL_BITFIELD thread_gnu_p : 1;
/* Whether "const" was specified. */
BOOL_BITFIELD const_p : 1;
/* Whether "volatile" was specified. */
BOOL_BITFIELD volatile_p : 1;
/* Whether "restrict" was specified. */
BOOL_BITFIELD restrict_p : 1;
/* Whether "_Atomic" was specified. */
BOOL_BITFIELD atomic_p : 1;
/* Whether "_Sat" was specified. */
BOOL_BITFIELD saturating_p : 1;
/* Whether any alignment specifier (even with zero alignment) was
specified. */
BOOL_BITFIELD alignas_p : 1;
/* Whether any __GIMPLE specifier was specified. */
BOOL_BITFIELD gimple_p : 1;
/* Whether any __RTL specifier was specified. */
BOOL_BITFIELD rtl_p : 1;
/* The address space that the declaration belongs to. */
addr_space_t address_space;
};
/* The various kinds of declarators in C. */
enum c_declarator_kind {
/* An identifier. */
cdk_id,
/* A function. */
cdk_function,
/* An array. */
cdk_array,
/* A pointer. */
cdk_pointer,
/* Parenthesized declarator with nested attributes. */
cdk_attrs
};
struct c_arg_tag {
/* The argument name. */
tree id;
/* The type of the argument. */
tree type;
};
/* Information about the parameters in a function declarator. */
struct c_arg_info {
/* A list of parameter decls. */
tree parms;
/* A list of structure, union and enum tags defined. */
vec<c_arg_tag, va_gc> *tags;
/* A list of argument types to go in the FUNCTION_TYPE. */
tree types;
/* A list of non-parameter decls (notably enumeration constants)
defined with the parameters. */
tree others;
/* A compound expression of VLA sizes from the parameters, or NULL.
In a function definition, these are used to ensure that
side-effects in sizes of arrays converted to pointers (such as a
parameter int i[n++]) take place; otherwise, they are
ignored. */
tree pending_sizes;
/* True when these arguments had [*]. */
BOOL_BITFIELD had_vla_unspec : 1;
};
/* A declarator. */
struct c_declarator {
/* The kind of declarator. */
enum c_declarator_kind kind;
location_t id_loc; /* Currently only set for cdk_id, cdk_array. */
/* Except for cdk_id, the contained declarator. For cdk_id, NULL. */
struct c_declarator *declarator;
union {
/* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract
declarator. */
tree id;
/* For functions. */
struct c_arg_info *arg_info;
/* For arrays. */
struct {
/* The array dimension, or NULL for [] and [*]. */
tree dimen;
/* The qualifiers inside []. */
int quals;
/* The attributes (currently ignored) inside []. */
tree attrs;
/* Whether [static] was used. */
BOOL_BITFIELD static_p : 1;
/* Whether [*] was used. */
BOOL_BITFIELD vla_unspec_p : 1;
} array;
/* For pointers, the qualifiers on the pointer type. */
int pointer_quals;
/* For attributes. */
tree attrs;
} u;
};
/* A type name. */
struct c_type_name {
/* The declaration specifiers. */
struct c_declspecs *specs;
/* The declarator. */
struct c_declarator *declarator;
};
/* A parameter. */
struct c_parm {
/* The declaration specifiers, minus any prefix attributes. */
struct c_declspecs *specs;
/* The attributes. */
tree attrs;
/* The declarator. */
struct c_declarator *declarator;
};
/* Used when parsing an enum. Initialized by start_enum. */
struct c_enum_contents
{
/* While defining an enum type, this is 1 plus the last enumerator
constant value. */
tree enum_next_value;
/* Nonzero means that there was overflow computing enum_next_value. */
int enum_overflow;
};
/* A type of reference to a static identifier in an inline
function. */
enum c_inline_static_type {
/* Identifier with internal linkage used in function that may be an
inline definition (i.e., file-scope static). */
csi_internal,
/* Modifiable object with static storage duration defined in
function that may be an inline definition (i.e., local
static). */
csi_modifiable
};
/* in c-parser.c */
extern void c_parse_init (void);
extern bool c_keyword_starts_typename (enum rid keyword);
/* in c-aux-info.c */
extern void gen_aux_info_record (tree, int, int, int);
/* in c-decl.c */
struct c_spot_bindings;
struct c_struct_parse_info;
extern struct obstack parser_obstack;
extern tree c_break_label;
extern tree c_cont_label;
extern bool global_bindings_p (void);
extern void push_scope (void);
extern tree pop_scope (void);
extern void c_bindings_start_stmt_expr (struct c_spot_bindings *);
extern void c_bindings_end_stmt_expr (struct c_spot_bindings *);
extern void record_inline_static (location_t, tree, tree,
enum c_inline_static_type);
extern void c_init_decl_processing (void);
extern void c_print_identifier (FILE *, tree, int);
extern int quals_from_declspecs (const struct c_declspecs *);
extern struct c_declarator *build_array_declarator (location_t, tree,
struct c_declspecs *,
bool, bool);
extern tree build_enumerator (location_t, location_t, struct c_enum_contents *,
tree, tree);
extern tree check_for_loop_decls (location_t, bool);
extern void mark_forward_parm_decls (void);
extern void declare_parm_level (void);
extern void undeclared_variable (location_t, tree);
extern tree lookup_label_for_goto (location_t, tree);
extern tree declare_label (tree);
extern tree define_label (location_t, tree);
extern struct c_spot_bindings *c_get_switch_bindings (void);
extern void c_release_switch_bindings (struct c_spot_bindings *);
extern bool c_check_switch_jump_warnings (struct c_spot_bindings *,
location_t, location_t);
extern void finish_decl (tree, location_t, tree, tree, tree);
extern tree finish_enum (tree, tree, tree);
extern void finish_function (void);
extern tree finish_struct (location_t, tree, tree, tree,
struct c_struct_parse_info *);
extern struct c_arg_info *build_arg_info (void);
extern struct c_arg_info *get_parm_info (bool, tree);
extern tree grokfield (location_t, struct c_declarator *,
struct c_declspecs *, tree, tree *);
extern tree groktypename (struct c_type_name *, tree *, bool *);
extern tree grokparm (const struct c_parm *, tree *);
extern tree implicitly_declare (location_t, tree);
extern void keep_next_level (void);
extern void pending_xref_error (void);
extern void c_push_function_context (void);
extern void c_pop_function_context (void);
extern void push_parm_decl (const struct c_parm *, tree *);
extern struct c_declarator *set_array_declarator_inner (struct c_declarator *,
struct c_declarator *);
extern tree c_builtin_function (tree);
extern tree c_builtin_function_ext_scope (tree);
extern void shadow_tag (const struct c_declspecs *);
extern void shadow_tag_warned (const struct c_declspecs *, int);
extern tree start_enum (location_t, struct c_enum_contents *, tree);
extern int start_function (struct c_declspecs *, struct c_declarator *, tree);
extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool,
tree);
extern tree start_struct (location_t, enum tree_code, tree,
struct c_struct_parse_info **);
extern void store_parm_decls (void);
extern void store_parm_decls_from (struct c_arg_info *);
extern void temp_store_parm_decls (tree, tree);
extern void temp_pop_parm_decls (void);
extern tree xref_tag (enum tree_code, tree);
extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree);
extern struct c_parm *build_c_parm (struct c_declspecs *, tree,
struct c_declarator *);
extern struct c_declarator *build_attrs_declarator (tree,
struct c_declarator *);
extern struct c_declarator *build_function_declarator (struct c_arg_info *,
struct c_declarator *);
extern struct c_declarator *build_id_declarator (tree);
extern struct c_declarator *make_pointer_declarator (struct c_declspecs *,
struct c_declarator *);
extern struct c_declspecs *build_null_declspecs (void);
extern struct c_declspecs *declspecs_add_qual (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_type (location_t,
struct c_declspecs *,
struct c_typespec);
extern struct c_declspecs *declspecs_add_scspec (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_attrs (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_addrspace (source_location,
struct c_declspecs *,
addr_space_t);
extern struct c_declspecs *declspecs_add_alignas (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *finish_declspecs (struct c_declspecs *);
/* in c-objc-common.c */
extern bool c_objc_common_init (void);
extern bool c_missing_noreturn_ok_p (tree);
extern bool c_warn_unused_global_decl (const_tree);
extern void c_initialize_diagnostics (diagnostic_context *);
extern bool c_vla_unspec_p (tree x, tree fn);
/* in c-typeck.c */
extern int in_alignof;
extern int in_sizeof;
extern int in_typeof;
extern tree c_last_sizeof_arg;
extern struct c_switch *c_switch_stack;
extern tree c_objc_common_truthvalue_conversion (location_t, tree);
extern tree require_complete_type (location_t, tree);
extern int same_translation_unit_p (const_tree, const_tree);
extern int comptypes (tree, tree);
extern int comptypes_check_different_types (tree, tree, bool *);
extern bool c_vla_type_p (const_tree);
extern bool c_mark_addressable (tree, bool = false);
extern void c_incomplete_type_error (location_t, const_tree, const_tree);
extern tree c_type_promotes_to (tree);
extern struct c_expr default_function_array_conversion (location_t,
struct c_expr);
extern struct c_expr default_function_array_read_conversion (location_t,
struct c_expr);
extern struct c_expr convert_lvalue_to_rvalue (location_t, struct c_expr,
bool, bool);
extern void mark_exp_read (tree);
extern tree composite_type (tree, tree);
extern tree build_component_ref (location_t, tree, tree, location_t);
extern tree build_array_ref (location_t, tree, tree);
extern tree build_external_ref (location_t, tree, int, tree *);
extern void pop_maybe_used (bool);
extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr);
extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *);
extern struct c_expr parser_build_unary_op (location_t, enum tree_code,
struct c_expr);
extern struct c_expr parser_build_binary_op (location_t,
enum tree_code, struct c_expr,
struct c_expr);
extern tree build_conditional_expr (location_t, tree, bool, tree, tree,
tree, tree);
extern tree build_compound_expr (location_t, tree, tree);
extern tree c_cast_expr (location_t, struct c_type_name *, tree);
extern tree build_c_cast (location_t, tree, tree);
extern void store_init_value (location_t, tree, tree, tree);
extern void maybe_warn_string_init (location_t, tree, struct c_expr);
extern void start_init (tree, tree, int, rich_location *);
extern void finish_init (void);
extern void really_start_incremental_init (tree);
extern void finish_implicit_inits (location_t, struct obstack *);
extern void push_init_level (location_t, int, struct obstack *);
extern struct c_expr pop_init_level (location_t, int, struct obstack *,
location_t);
extern void set_init_index (location_t, tree, tree, struct obstack *);
extern void set_init_label (location_t, tree, location_t, struct obstack *);
extern void process_init_element (location_t, struct c_expr, bool,
struct obstack *);
extern tree build_compound_literal (location_t, tree, tree, bool);
extern void check_compound_literal_type (location_t, struct c_type_name *);
extern tree c_start_case (location_t, location_t, tree, bool);
extern void c_finish_case (tree, tree);
extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool);
extern tree build_asm_stmt (tree, tree);
extern int c_types_compatible_p (tree, tree);
extern tree c_begin_compound_stmt (bool);
extern tree c_end_compound_stmt (location_t, tree, bool);
extern void c_finish_if_stmt (location_t, tree, tree, tree);
extern void c_finish_loop (location_t, tree, tree, tree, tree, tree, bool);
extern tree c_begin_stmt_expr (void);
extern tree c_finish_stmt_expr (location_t, tree);
extern tree c_process_expr_stmt (location_t, tree);
extern tree c_finish_expr_stmt (location_t, tree);
extern tree c_finish_return (location_t, tree, tree);
extern tree c_finish_bc_stmt (location_t, tree *, bool);
extern tree c_finish_goto_label (location_t, tree);
extern tree c_finish_goto_ptr (location_t, tree);
extern tree c_expr_to_decl (tree, bool *, bool *);
extern tree c_finish_omp_construct (location_t, enum tree_code, tree, tree);
extern tree c_finish_oacc_data (location_t, tree, tree);
extern tree c_finish_oacc_host_data (location_t, tree, tree);
extern tree c_begin_omp_parallel (void);
extern tree c_finish_omp_parallel (location_t, tree, tree);
extern tree c_begin_omp_task (void);
extern tree c_finish_omp_task (location_t, tree, tree);
extern void c_finish_omp_cancel (location_t, tree);
extern void c_finish_omp_cancellation_point (location_t, tree);
extern tree c_finish_omp_clauses (tree, enum c_omp_region_type);
extern tree c_build_va_arg (location_t, tree, location_t, tree);
extern tree c_finish_transaction (location_t, tree, int);
extern bool c_tree_equal (tree, tree);
extern tree c_build_function_call_vec (location_t, vec<location_t>, tree,
vec<tree, va_gc> *, vec<tree, va_gc> *);
extern tree c_omp_clause_copy_ctor (tree, tree, tree);
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
extern int current_function_returns_value;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
extern int current_function_returns_null;
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
extern int current_function_returns_abnormally;
/* In c-decl.c */
/* Tell the binding oracle what kind of binding we are looking for. */
enum c_oracle_request
{
C_ORACLE_SYMBOL,
C_ORACLE_TAG,
C_ORACLE_LABEL
};
/* If this is non-NULL, then it is a "binding oracle" which can lazily
create bindings when needed by the C compiler. The oracle is told
the name and type of the binding to create. It can call pushdecl
or the like to ensure the binding is visible; or do nothing,
leaving the binding untouched. c-decl.c takes note of when the
oracle has been called and will not call it again if it fails to
create a given binding. */
typedef void c_binding_oracle_function (enum c_oracle_request, tree identifier);
extern c_binding_oracle_function *c_binding_oracle;
extern void c_finish_incomplete_decl (tree);
extern tree c_omp_reduction_id (enum tree_code, tree);
extern tree c_omp_reduction_decl (tree);
extern tree c_omp_reduction_lookup (tree, tree);
extern tree c_check_omp_declare_reduction_r (tree *, int *, void *);
extern void c_pushtag (location_t, tree, tree);
extern void c_bind (location_t, tree, bool);
extern bool tag_exists_p (enum tree_code, tree);
/* In c-errors.c */
extern bool pedwarn_c90 (location_t, int opt, const char *, ...)
ATTRIBUTE_GCC_DIAG(3,4);
extern bool pedwarn_c99 (location_t, int opt, const char *, ...)
ATTRIBUTE_GCC_DIAG(3,4);
extern void
set_c_expr_source_range (c_expr *expr,
location_t start, location_t finish);
extern void
set_c_expr_source_range (c_expr *expr,
source_range src_range);
/* In c-fold.c */
extern tree decl_constant_value_for_optimization (tree);
extern vec<tree> incomplete_record_decls;
#endif /* ! GCC_C_TREE_H */
|
solucao_omp_critical.c | /******************************************************************************
* OpenMP Example - Matrix-vector multiplication - C/C++ Version
* FILE: omp_matvec.c
* DESCRIPTION:
* This example multiplies all row i elements of matrix A with vector
* element b(i) and stores the summed products in vector c(i). A total is
* maintained for the entire matrix. Performed by using the OpenMP loop
* work-sharing construct. The update of the shared global total is
* serialized by using the OpenMP critical directive.
* SOURCE: Blaise Barney 5/99
* LAST REVISED:
******************************************************************************/
#include <omp.h>
#define SIZE 10
main ()
{
float A[SIZE][SIZE], b[SIZE], c[SIZE], total;
int i, j, tid;
/* Initializations */
total = 0.0;
for (i=0; i < SIZE; i++)
{
for (j=0; j < SIZE; j++)
A[i][j] = (j+1) * 1.0;
b[i] = 1.0 * (i+1);
c[i] = 0.0;
}
printf("\nStarting values of matrix A and vector b:\n");
for (i=0; i < SIZE; i++)
{
printf(" A[%d]= ",i);
for (j=0; j < SIZE; j++)
printf("%.1f ",A[i][j]);
printf(" b[%d]= %.1f\n",i,b[i]);
}
printf("\nResults by thread/row:\n");
/* Create a team of threads and scope variables */
#pragma omp parallel shared(A,b,c,total) private(tid,i)
{
tid = omp_get_thread_num();
/* Loop work-sharing construct - distribute rows of matrix */
#pragma omp for private(j)
for (i=0; i < SIZE; i++)
{
for (j=0; j < SIZE; j++)
c[i] += (A[i][j] * b[i]);
/* Update and display of running total must be serialized */
#pragma omp critical
{
total = total + c[i];
printf(" thread %d did row %d\t c[%d]=%.2f\t",tid,i,i,c[i]);
printf("Running total= %.2f\n",total);
}
} /* end of parallel i loop */
} /* end of parallel construct */
printf("\nMatrix-vector total - sum of all c[] = %.2f\n\n",total);
}
|
newtonpf.h | /*
* newtonpf.cuh
*
* Created on: 23/09/2015
* Author: Igor M. Araújo
*/
#ifndef NEWTONPF_CUH_
#define NEWTONPF_CUH_
#include <Eigen/SparseLU>
#include "util/quicksort.h"
#include "util/timer.h"
using namespace std;
using namespace Eigen;
__host__ double mkl_checkConvergence(
Bus* buses,
unsigned int* pv,
unsigned int* pq,
int nnzYbus,
int* csrRowPtrYbus,
int* csrColIndYbus,
cuDoubleComplex* csrValYbus,
cuDoubleComplex *V,
double *F)
{
double err = 0.0;
#pragma omp parallel for
for (int id = 0; id < H_NPV + H_NPQ; id++)
{
int i, indice;
if (id < H_NPV)
{
i = id;
indice = pv[i];
}
else
{
i = id - H_NPV;
indice = pq[i];
}
cuDoubleComplex c = make_cuDoubleComplex(0, 0);
for (int k = csrRowPtrYbus[indice] - BASE_INDEX, endFor = csrRowPtrYbus[indice + 1] - BASE_INDEX; k < endFor; k++) {
int j = csrColIndYbus[k] - BASE_INDEX;
c = cuCadd(c, cuCmul(csrValYbus[k], V[j]));
}
Bus l_bus = buses[indice];
cuDoubleComplex pot = make_cuDoubleComplex(l_bus.P, l_bus.Q);
cuDoubleComplex miss = cuCmul(V[indice], cuConj(c));
miss = cuCsub(miss, pot);
if (l_bus.type == l_bus.PV) {
F[i] = cuCreal(miss);
#pragma omp critical
err = max(err, abs(cuCreal(miss)));
}
if (l_bus.type == l_bus.PQ) {
F[H_NPV+ i] = cuCreal(miss);
#pragma omp critical
err = max(err, abs(cuCreal(miss)));
F[H_NPV + H_NPQ + i] = cuCimag(miss);
#pragma omp critical
err = max(err, abs(cuCimag(miss)));
}
}
return err;
}
__host__ void mkl_computeDiagIbus(
int nnzYbus,
int* csrRowPtrYbus,
int* csrColIndYbus,
cuDoubleComplex* csrValYbus,
cuDoubleComplex* V,
cuDoubleComplex* diagIbus)
{
#pragma omp parallel for
for (int i = 0; i < H_NBUS; i++)
{
double real = 0.0;
double imag = 0.0;
for(int k = csrRowPtrYbus[i] - BASE_INDEX, endFor = csrRowPtrYbus[i + 1] - BASE_INDEX; k < endFor; k++){
int j = csrColIndYbus[k] - BASE_INDEX;
cuDoubleComplex matrixAdmittance = csrValYbus[k];
cuDoubleComplex voltage = V[j];
real += cuCreal(matrixAdmittance) * cuCreal(voltage) - cuCimag(matrixAdmittance) * cuCimag(voltage);
imag += cuCreal(matrixAdmittance) * cuCimag(voltage) + cuCimag(matrixAdmittance) * cuCreal(voltage);
}
diagIbus[i] = make_cuDoubleComplex(real, imag);
}
}
__host__ void mkl_compuateJacobianMatrix(
int nnzJ,
int* d_cooRowJ,
int* csrRowPtrJ,
int* csrColIndJ,
double* csrValJ,
unsigned int* device_pq,
unsigned int* device_pv,
int nnzYbus,
int* csrRowPtrYbus,
int* csrColIndYbus,
cuDoubleComplex* csrValYbus,
cuDoubleComplex* diagIbus,
cuDoubleComplex* V)
{
#pragma omp parallel for
for (int id = 0; id < nnzJ; id++)
{
int length = (H_NPV + H_NPQ);
int i = d_cooRowJ[id];
int j = csrColIndJ[id];
int ii, jj;
if (i < length) {
ii = (i < H_NPV) ? device_pv[i] : device_pq[i - H_NPV];
} else {
ii = device_pq[i - H_NPV - H_NPQ];
}
if (j < length) {
jj = (j < H_NPV) ? device_pv[j] : device_pq[j - H_NPV];
} else {
jj = device_pq[j - H_NPV - H_NPQ];
}
cuDoubleComplex admittance = make_cuDoubleComplex(0,0);
for(int k = csrRowPtrYbus[ii] - BASE_INDEX, endFor = csrRowPtrYbus[ii + 1] - BASE_INDEX; k < endFor; k++)
{
if(jj == csrColIndYbus[k] - BASE_INDEX){
admittance = csrValYbus[k];
break;
}
}
double admittanceReal = cuCreal(admittance);
double admittanceImag = cuCimag(admittance);
double magnitude_j = cuCreal(V[jj]);
double angle_j = cuCimag(V[jj]);
double IbusReal = ((ii == jj) ? cuCreal(diagIbus[ii]) : 0.0);
double IbusImag = ((ii == jj) ? cuCimag(diagIbus[ii]) : 0.0);
double magnitude_i = cuCreal(V[ii]);
double angle_i = cuCimag(V[ii]);
if (i < length)
{
if (j < length) {
double real = admittanceReal * magnitude_j - admittanceImag * angle_j;
double imag = admittanceReal * angle_j + admittanceImag * magnitude_j;
csrValJ[id] = -angle_i * (IbusReal - real) - magnitude_i * (-IbusImag + imag);
}
else // if (j < length)
{
double abs = sqrt(magnitude_j * magnitude_j + angle_j * angle_j);
double real = admittanceReal * magnitude_j / abs - admittanceImag * angle_j / abs;
double imag = admittanceReal * angle_j / abs + admittanceImag * magnitude_j / abs;
csrValJ[id] = magnitude_i * real - angle_i * -imag + IbusReal * magnitude_j / abs + IbusImag * angle_j / abs;
}
}
else // if (i < length)
{
if (j < length)
{
double real = admittanceReal * magnitude_j - admittanceImag * angle_j;
double imag = admittanceReal * angle_j + admittanceImag * magnitude_j;
csrValJ[id] = -angle_i * (-IbusImag + imag) + magnitude_i * (IbusReal - real);
}
else //if (j < length)
{
double abs = sqrt(magnitude_j * magnitude_j + angle_j * angle_j);
double real = admittanceReal * magnitude_j / abs - admittanceImag * angle_j / abs;
double imag = admittanceReal * angle_j / abs + admittanceImag * magnitude_j / abs;
csrValJ[id] = magnitude_i * -imag + angle_i * real + IbusReal * angle_j / abs + -IbusImag * magnitude_j / abs;
}
}
}
}
__host__ void mkl_updateVoltage(
unsigned int *pv,
unsigned int *pq,
cuDoubleComplex *V,
double *dx)
{
#pragma omp parallel for
for (int id = 0; id < H_NPV + H_NPQ; id++)
{
int i;
if (id < H_NPV)
{
i = pv[id];
cuDoubleComplex voltage = V[i];
V[i] = cuCmul(make_cuDoubleComplex(cuCabs(voltage), 0), cuCexp(make_cuDoubleComplex(0, cuCangle(voltage) - dx[id])));
}
else
{
i = pq[id - H_NPV];
cuDoubleComplex voltage = V[i];
V[i] = cuCmul(make_cuDoubleComplex(cuCabs(voltage) - dx[H_NPQ + id], 0),cuCexp(make_cuDoubleComplex(0,cuCangle(voltage) - dx[id])));
}
}
}
__host__ void mkl_computeNnzJacobianMatrix()
{
// #1 Predict nonzero numbers of Matrix J
for(int i = 0; i < H_NBUS; i++)
{
for(int k = csrRowPtrYbus[i] - BASE_INDEX; k < csrRowPtrYbus[i + 1] - BASE_INDEX; k++)
{
int j = csrColIndYbus[k] - BASE_INDEX;
if(buses[i].type == Bus::PV && buses[j].type == Bus::PV)
{
nnzJ++;
}
if(buses[i].type == Bus::PV && buses[j].type == Bus::PQ)
{
nnzJ += 2;
}
if(buses[i].type == Bus::PQ && buses[j].type == Bus::PV)
{
nnzJ += 2;
}
if(buses[i].type == Bus::PQ && buses[j].type == Bus::PQ)
{
nnzJ += 4;
}
}
}
// #2 Compute indexes of Matrix J with nonzero numbers
int *cooColJ;
cooRowJ = (int*) malloc(sizeof(int) * nnzJ);
cooColJ = (int*) malloc(sizeof(int) * nnzJ);
csrValJ = (double*) MKL_malloc(sizeof(double) * nnzJ, 64);
csrColIndJ = (int*) MKL_malloc(sizeof(int) * nnzJ, 64);
int ptr = 0;
for(int i = 0; i < H_NBUS; i++)
{
for(int k = csrRowPtrYbus[i] - BASE_INDEX; k < csrRowPtrYbus[i + 1] - BASE_INDEX; k++)
{
int j = csrColIndYbus[k] - BASE_INDEX;
if(buses[i].type == Bus::PV && buses[j].type == Bus::PV)
{
cooRowJ[ptr] = buses[i].indicePVPQ;
cooColJ[ptr] = buses[j].indicePVPQ;
ptr++;
}
if(buses[i].type == Bus::PV && buses[j].type == Bus::PQ)
{
cooRowJ[ptr] = buses[i].indicePVPQ;
cooColJ[ptr] = buses[j].indicePVPQ;
ptr++;
cooRowJ[ptr] = buses[i].indicePVPQ;
cooColJ[ptr] = buses[j].indicePVPQ + H_NPQ;
ptr++;
}
if(buses[i].type == Bus::PQ && buses[j].type == Bus::PV)
{
cooRowJ[ptr] = buses[i].indicePVPQ;
cooColJ[ptr] = buses[j].indicePVPQ;
ptr++;
cooRowJ[ptr] = buses[i].indicePVPQ + H_NPQ;
cooColJ[ptr] = buses[j].indicePVPQ;
ptr++;
}
if(buses[i].type == Bus::PQ && buses[j].type == Bus::PQ)
{
cooRowJ[ptr] = buses[i].indicePVPQ;
cooColJ[ptr] = buses[j].indicePVPQ;
ptr++;
cooRowJ[ptr] = buses[i].indicePVPQ + H_NPQ;
cooColJ[ptr] = buses[j].indicePVPQ;
ptr++;
cooRowJ[ptr] = buses[i].indicePVPQ;
cooColJ[ptr] = buses[j].indicePVPQ + H_NPQ;
ptr++;
cooRowJ[ptr] = buses[i].indicePVPQ + H_NPQ;
cooColJ[ptr] = buses[j].indicePVPQ + H_NPQ;
ptr++;
}
}
}
// #3 Sort Matrix J by ROW
int info;
int length = H_NPV + 2 * H_NPQ;
int job[6];
job[0] = 2;
job[1] = 0;
job[2] = 0;
job[4] = nnzJ;
job[5] = 0;
MKL_DCSRCOO((const int*) &job,(const int*) &length, csrValJ, csrColIndJ,csrRowPtrJ, &nnzJ,csrValJ, cooRowJ, cooColJ, &info);
if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
quickSort(cooRowJ, 0, nnzJ - 1);
// #5 Clear Memory
free(cooColJ);
}
__host__ void mkl_solver_MKL_DSS()
{
int length = H_NPV + 2 * H_NPQ;
_MKL_DSS_HANDLE_t handle;
MKL_INT opt;
opt = MKL_DSS_MSG_LVL_WARNING;
// opt += MKL_DSS_TERM_LVL_ERROR;
opt += MKL_DSS_ZERO_BASED_INDEXING;
MKL_INT result;
result = DSS_CREATE(handle, opt); if(result != MKL_DSS_SUCCESS){printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
MKL_INT opt_define = MKL_DSS_NON_SYMMETRIC;
result = DSS_DEFINE_STRUCTURE(handle, opt_define, csrRowPtrJ, length, length, csrColIndJ, nnzJ);if(result != MKL_DSS_SUCCESS){printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
int *perm = (int*) MKL_malloc(sizeof(int) * length, 64);
for(int i = 0; i < length; i++){
perm[i] = i;
}
MKL_INT opt_REORDER = MKL_DSS_AUTO_ORDER;
result = DSS_REORDER(handle, opt_REORDER,perm);if(result != MKL_DSS_SUCCESS){printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
// MKL_INT opt_REORDER2 = MKL_DSS_GET_ORDER;
// result = DSS_REORDER(handle, opt_REORDER2,perm);if(result != MKL_DSS_SUCCESS){printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
MKL_INT opt_FACTOR = MKL_DSS_POSITIVE_DEFINITE;
result = DSS_FACTOR_REAL(handle, opt_FACTOR, csrValJ);if(result != MKL_DSS_SUCCESS){printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
MKL_INT opt_DEFAULT = MKL_DSS_DEFAULTS;
MKL_INT nrhs = 1;
result = DSS_SOLVE_REAL(handle, opt_DEFAULT, F, nrhs, dx);if(result != MKL_DSS_SUCCESS){printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
result = DSS_DELETE(handle, opt_DEFAULT);if(result != MKL_DSS_SUCCESS){printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
MKL_free(perm);
}
__host__ void eigen_sparseLU_solver(){
int length = H_NPV + 2 * H_NPQ;
SparseMatrix<double> A(length, length);
for(int i = 0; i < length; i++){
for(int k = csrRowPtrJ[i]; k < csrRowPtrJ[i+1]; k++){
int j = csrColIndJ[k];
A.insert(i, j) = csrValJ[k];
}
}
A.makeCompressed();
SparseLU<SparseMatrix<double>, COLAMDOrdering<int> > solverA;
solverA.compute(A);
VectorXd B(length);
for(int i = 0; i < length; i++){
B(i) = F[i];
}
VectorXd X = solverA.solve(B);
for(int i = 0; i < length; i++){
dx[i] = X(i);
}
}
__host__ bool mkl_newtonpf()
{
double start;
start =GetTimer();
double err = mkl_checkConvergence(
buses,
pv,
pq,
nnzYbus,
csrRowPtrYbus,
csrColIndYbus,
csrValYbus,
V,
F);
timeTable[TIME_CHECKCONVERGENCE] += GetTimer() - start;
#ifdef DEBUG
int length = H_NPV + 2 * H_NPQ;
printf("F = \n");
for(int i = 0; i < length; i++){
double value = F[i];
printf("\t(%d)\t->\t%.4e\n", i+1, value);
}
#endif
int iter = 0;
bool converged = false;
if (err < EPS) {
converged = true;
}
while (!converged && iter < MAX_IT_NR) {
iter++;
start =GetTimer();
mkl_computeDiagIbus(
nnzYbus,
csrRowPtrYbus,
csrColIndYbus,
csrValYbus,
V,
diagIbus);
timeTable[TIME_COMPUTEDIAGIBUS] += GetTimer() - start;
#ifdef DEBUG
printf("diagIbus = \n");
for(int i = 0; i < H_NBUS; i++){
cuDoubleComplex value = diagIbus[i];
printf("%.4e %c %.4ei\n", value.x,((value.y < 0.0) ? '-' : '+'),((value.y < 0.0) ? -value.y : value.y));
}
#endif
if(nnzJ == 0)
{
start =GetTimer();
mkl_computeNnzJacobianMatrix();
timeTable[TIME_COMPUTENNZJACOBIANMATRIX] += GetTimer() - start;
}
start =GetTimer();
mkl_compuateJacobianMatrix(
nnzJ,
cooRowJ,
csrRowPtrJ,
csrColIndJ,
csrValJ,
pq,
pv,
nnzYbus,
csrRowPtrYbus,
csrColIndYbus,
csrValYbus,
diagIbus,
V);
timeTable[TIME_COMPUTEJACOBIANMATRIX] += GetTimer() - start;
#ifdef DEBUG
printf("J = \n");
printf("\tCompressed Sparse Column(rows = %d, cols = %d, nnz = %d [%.2lf])\n",length, length,nnzJ, nnzJ * 100.0f / (length * length));
for(int j = 0; j < length; j++){
for(int i = 0; i < length; i++){
for(int k = csrRowPtrJ[i]; k < csrRowPtrJ[i + 1]; k++){
if(j == csrColIndJ[k]){
double value = csrValJ[k];
printf("\t(%d, %d)\t->\t%.4e\n", i+1, j+1, value);
break;
}
}
}
}
#endif
// compute update step ------------------------------------------------
switch(H_LinearSolver){
case MKL_DSS:
start =GetTimer();
mkl_solver_MKL_DSS();
timeTable[TIME_SOLVER_MKL_DSS] += GetTimer() - start;
break;
case Eigen_SparseLU:
start =GetTimer();
eigen_sparseLU_solver();
timeTable[TIME_SOLVER_MKL_DSS] += GetTimer() - start;
break;
}
#ifdef DEBUG
printf("dx = \n");
for(int i = 0; i < length; i++){
double value = dx[i];
printf("\t(%d)\t->\t%.4e\n", i+1, -value);
}
#endif
start =GetTimer();
mkl_updateVoltage(
pv,
pq,
V,
dx);
timeTable[TIME_UPDATEVOLTAGE] += GetTimer() - start;
#ifdef DEBUG
printf("V = \n");
for(int i = 0; i < H_NBUS; i++)
{
printf("%.4e %c %.4ei\n", V[i].x, ((V[i].y < 0) ? '-' : '+'), ((V[i].y < 0) ? -V[i].y : V[i].y));
}
#endif
start =GetTimer();
err = mkl_checkConvergence(
buses,
pv,
pq,
nnzYbus,
csrRowPtrYbus,
csrColIndYbus,
csrValYbus,
V,
F);
timeTable[TIME_CHECKCONVERGENCE] += GetTimer() - start;
#ifdef DEBUG
printf("F = \n");
for(int i = 0; i < length; i++){
double value = F[i];
printf("\t(%d)\t->\t%.4e\n", i+1, value);
}
#endif
if (err < EPS) {
converged = true;
}
}
return converged;
}
__global__ void hybrid_checkConvergence(
int nTest,
Bus* buses,
unsigned int* pv,
unsigned int* pq,
int nnzYbus,
int* csrRowPtrYbus,
int* csrColIndYbus,
cuDoubleComplex* csrValYbus,
cuDoubleComplex *V,
double *F) {
int id = ID();
if (id < D_NPV + D_NPQ) {
int i, indice;
if (id < D_NPV) {
i = id;
indice = pv[i];
} else {
i = id - D_NPV;
indice = pq[i];
}
cuDoubleComplex c = make_cuDoubleComplex(0, 0);
for (int k = csrRowPtrYbus[indice], endFor = csrRowPtrYbus[indice + 1]; k < endFor; k++) {
int j = csrColIndYbus[k];
c = cuCadd(c, cuCmul(csrValYbus[k], V[j]));
}
Bus l_bus = buses[indice];
cuDoubleComplex pot = make_cuDoubleComplex(l_bus.P, l_bus.Q);
cuDoubleComplex miss = cuCmul(V[indice], cuConj(c));
miss = cuCsub(miss, pot);
if (l_bus.type == l_bus.PV) {
F[i] = cuCreal(miss);
}
if (l_bus.type == l_bus.PQ) {
F[D_NPV + i ] = cuCreal(miss);
F[D_NPV + D_NPQ + i] = cuCimag(miss);
}
}
}
__global__ void hybrid_computeDiagIbus(
int test,
int nnzYbus,
int* csrRowPtrYbus,
int* csrColIndYbus,
cuDoubleComplex* csrValYbus,
cuDoubleComplex* V,
cuDoubleComplex* diagIbus)
{
double real = 0.0;
double imag = 0.0;
int i = ID();
if (i < D_NBUS) {
for(int k = csrRowPtrYbus[i], endFor = csrRowPtrYbus[i + 1]; k < endFor; k++){
int j = csrColIndYbus[k];
cuDoubleComplex matrixAdmittance = csrValYbus[k];
cuDoubleComplex voltage = V[j];
real += cuCreal(matrixAdmittance) * cuCreal(voltage) - cuCimag(matrixAdmittance) * cuCimag(voltage);
imag += cuCreal(matrixAdmittance) * cuCimag(voltage) + cuCimag(matrixAdmittance) * cuCreal(voltage);
}
diagIbus[i] = make_cuDoubleComplex(real, imag);
}
}
__global__ void hybrid_compuateJacobianMatrix(
int test,
int nnzJ,
int* d_cooRowJ,
int* csrRowPtrJ,
int* csrColIndJ,
double* csrValJ,
unsigned int* device_pq,
unsigned int* device_pv,
int nnzYbus,
int* csrRowPtrYbus,
int* csrColIndYbus,
cuDoubleComplex* csrValYbus,
cuDoubleComplex* diagIbus,
cuDoubleComplex* V)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < nnzJ)
{
int length = (D_NPV + D_NPQ);
int i = d_cooRowJ[id];
int j = csrColIndJ[id];
int ii, jj;
if (i < length) {
ii = (i < D_NPV) ? device_pv[i] : device_pq[i - D_NPV];
} else {
ii = device_pq[i - D_NPV - D_NPQ];
}
if (j < length) {
jj = (j < D_NPV) ? device_pv[j] : device_pq[j - D_NPV];
} else {
jj = device_pq[j - D_NPV - D_NPQ];
}
cuDoubleComplex admittance;
for(int k = csrRowPtrYbus[ii], endFor = csrRowPtrYbus[ii + 1]; k < endFor; k++)
{
if(jj == csrColIndYbus[k]){
admittance = csrValYbus[k];
break;
}
}
double admittanceReal = cuCreal(admittance);
double admittanceImag = cuCimag(admittance);
double magnitude_j = cuCreal(V[jj]);
double angle_j = cuCimag(V[jj]);
double IbusReal = ((ii == jj) ? cuCreal(diagIbus[ii]) : 0.0);
double IbusImag = ((ii == jj) ? cuCimag(diagIbus[ii]) : 0.0);
double magnitude_i = cuCreal(V[ii]);
double angle_i = cuCimag(V[ii]);
if (i < length)
{
if (j < length) {
double real = admittanceReal * magnitude_j - admittanceImag * angle_j;
double imag = admittanceReal * angle_j + admittanceImag * magnitude_j;
csrValJ[id] = -angle_i * (IbusReal - real) - magnitude_i * (-IbusImag + imag);
}
else // if (j < length)
{
double abs = sqrt(magnitude_j * magnitude_j + angle_j * angle_j);
double real = admittanceReal * magnitude_j / abs - admittanceImag * angle_j / abs;
double imag = admittanceReal * angle_j / abs + admittanceImag * magnitude_j / abs;
csrValJ[id] = magnitude_i * real - angle_i * -imag + IbusReal * magnitude_j / abs + IbusImag * angle_j / abs;
}
}
else // if (i < length)
{
if (j < length)
{
double real = admittanceReal * magnitude_j - admittanceImag * angle_j;
double imag = admittanceReal * angle_j + admittanceImag * magnitude_j;
csrValJ[id] = -angle_i * (-IbusImag + imag) + magnitude_i * (IbusReal - real);
}
else //if (j < length)
{
double abs = sqrt(magnitude_j * magnitude_j + angle_j * angle_j);
double real = admittanceReal * magnitude_j / abs - admittanceImag * angle_j / abs;
double imag = admittanceReal * angle_j / abs + admittanceImag * magnitude_j / abs;
csrValJ[id] = magnitude_i * -imag + angle_i * real + IbusReal * angle_j / abs + -IbusImag * magnitude_j / abs;
}
}
}
}
__global__ void hybrid_updateVoltage(
int test,
unsigned int *pv,
unsigned int *pq,
cuDoubleComplex *V,
double *dx)
{
int id = ID();
int i;
if (id < D_NPV + D_NPQ) {
if (id < D_NPV) {
i = pv[id];
cuDoubleComplex voltage = V[i];
V[i] = cuCmul(make_cuDoubleComplex(cuCabs(voltage), 0), cuCexp(make_cuDoubleComplex(0, cuCangle(voltage) - dx[id])));
} else {
i = pq[id - D_NPV];
cuDoubleComplex voltage = V[i];
V[i] = cuCmul(make_cuDoubleComplex(cuCabs(voltage) - dx[D_NPQ + id], 0),cuCexp(make_cuDoubleComplex(0,cuCangle(voltage) - dx[id])));
}
}
}
__host__ void hybrid_computeNnzJacobianMatrix()
{
// #1 Predict nonzero numbers of Matrix J
int *row, *col;
row = (int*) malloc(sizeof(int) * (H_NBUS + 1));
col = (int*) malloc(sizeof(int) * nnzYbus);
checkCudaErrors(cudaMemcpy(row, csrRowPtrYbus, sizeof(int) * (H_NBUS + 1), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(col, csrColIndYbus, sizeof(int) * nnzYbus, cudaMemcpyDeviceToHost));
for(int i = 0; i < H_NBUS; i++)
{
for(int k = row[i]; k < row[i + 1]; k++)
{
int j = col[k];
if(buses[i].type == Bus::PV && buses[j].type == Bus::PV)
{
nnzJ++;
}
if(buses[i].type == Bus::PV && buses[j].type == Bus::PQ)
{
nnzJ += 2;
}
if(buses[i].type == Bus::PQ && buses[j].type == Bus::PV)
{
nnzJ += 2;
}
if(buses[i].type == Bus::PQ && buses[j].type == Bus::PQ)
{
nnzJ += 4;
}
}
}
// #2 Compute indexes of Matrix J with nonzero numbers
int *cooRowJ, *cooColJ;
cooRowJ = (int*) malloc(sizeof(int) * nnzJ);
cooColJ = (int*) malloc(sizeof(int) * nnzJ);
int ptr = 0;
for(int i = 0; i < H_NBUS; i++)
{
for(int k = row[i]; k < row[i + 1]; k++)
{
int j = col[k];
if(buses[i].type == Bus::PV && buses[j].type == Bus::PV)
{
cooRowJ[ptr] = buses[i].indicePVPQ;
cooColJ[ptr] = buses[j].indicePVPQ;
ptr++;
}
if(buses[i].type == Bus::PV && buses[j].type == Bus::PQ)
{
cooRowJ[ptr] = buses[i].indicePVPQ;
cooColJ[ptr] = buses[j].indicePVPQ;
ptr++;
cooRowJ[ptr] = buses[i].indicePVPQ;
cooColJ[ptr] = buses[j].indicePVPQ + H_NPQ;
ptr++;
}
if(buses[i].type == Bus::PQ && buses[j].type == Bus::PV)
{
cooRowJ[ptr] = buses[i].indicePVPQ;
cooColJ[ptr] = buses[j].indicePVPQ;
ptr++;
cooRowJ[ptr] = buses[i].indicePVPQ + H_NPQ;
cooColJ[ptr] = buses[j].indicePVPQ;
ptr++;
}
if(buses[i].type == Bus::PQ && buses[j].type == Bus::PQ)
{
cooRowJ[ptr] = buses[i].indicePVPQ;
cooColJ[ptr] = buses[j].indicePVPQ;
ptr++;
cooRowJ[ptr] = buses[i].indicePVPQ + H_NPQ;
cooColJ[ptr] = buses[j].indicePVPQ;
ptr++;
cooRowJ[ptr] = buses[i].indicePVPQ;
cooColJ[ptr] = buses[j].indicePVPQ + H_NPQ;
ptr++;
cooRowJ[ptr] = buses[i].indicePVPQ + H_NPQ;
cooColJ[ptr] = buses[j].indicePVPQ + H_NPQ;
ptr++;
}
}
}
// #3 Sort Matrix J by ROW
int *d_cooColJ;
checkCudaErrors(cudaMalloc((void**) &d_cooColJ, sizeof(int) * nnzJ));
if(d_cooRowJ == 0)
{
checkCudaErrors(cudaMalloc((void**) &d_cooRowJ, sizeof(int) * nnzJ));
checkCudaErrors(cudaMalloc((void**) &csrColIndJ, sizeof(int) * nnzJ));
checkCudaErrors(cudaMalloc((void**) &csrValJ, sizeof(double) * nnzJ * H_NTESTS));
}
checkCudaErrors(cudaMemcpy(d_cooColJ, cooColJ, sizeof(int) * nnzJ, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_cooRowJ, cooRowJ, sizeof(int) * nnzJ, cudaMemcpyHostToDevice));
cusparseHandle_t handle;
cusparseCreate(&handle);
checkCudaErrors(cusparseSetPointerMode(handle, CUSPARSE_POINTER_MODE_HOST));
int length = H_NPV + 2 * H_NPQ;
size_t buffer = 0;
void *pBuff;
checkCudaErrors(cusparseXcoosort_bufferSizeExt(handle, length, length, nnzJ, d_cooRowJ, d_cooColJ, &buffer));
checkCudaErrors(cudaMalloc((void**) &pBuff , buffer * sizeof(char)));
int *permu;
checkCudaErrors(cudaMalloc((void**) &permu, nnzJ * sizeof(int)));
checkCudaErrors(cusparseCreateIdentityPermutation(handle, nnzJ, permu));
checkCudaErrors(cusparseXcoosortByRow(handle, length, length, nnzJ, d_cooRowJ, d_cooColJ, permu, pBuff));
// #4 Convert Matrix J in Coordinate Format(COO) to Compressed Sparse Row Format(CSR)
checkCudaErrors(cusparseXcoo2csr(handle, (const int*) d_cooRowJ, nnzJ, length, csrRowPtrJ, CUSPARSE_INDEX_BASE_ZERO));
checkCudaErrors(cudaMemcpy(csrColIndJ, d_cooColJ, nnzJ * sizeof(int), cudaMemcpyDeviceToDevice));
h_csrColIndJ = (int*) malloc(sizeof(int) * nnzJ);
h_csrRowPtrJ = (int*) malloc(sizeof(int) * (length + 1));
checkCudaErrors(cudaMemcpy(h_csrColIndJ, csrColIndJ, sizeof(int) * nnzJ, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_csrRowPtrJ, csrRowPtrJ, sizeof(int) * (length + 1), cudaMemcpyDeviceToHost));
// #5 Clear Memory
free(row);
free(col);
free(cooRowJ);
free(cooColJ);
checkCudaErrors(cudaFree(permu));
checkCudaErrors(cudaFree(d_cooColJ));
checkCudaErrors(cusparseDestroy(handle));
}
__host__ void linearSolverSp(int nTest) {
int length = H_NPV + 2 * H_NPQ;
for (int i = 0; i < nTest; i++) {
cusolverSpHandle_t spHandle;
csrluInfoHost_t info;
checkCudaErrors(cusolverSpCreateCsrluInfoHost(&info));
checkCudaErrors(cusolverSpCreate(&spHandle));
cusparseMatDescr_t matDescA = 0;
cusparseCreateMatDescr(&matDescA);
cusparseSetMatType(matDescA, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(matDescA, CUSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(cusolverSpXcsrluAnalysisHost(spHandle, length, nnzJ, matDescA, h_csrRowPtrJ, h_csrColIndJ, info));
size_t size_internal;
size_t size_lu;
double *h_csrValJ;
h_csrValJ = (double*) malloc(sizeof(double) * nnzJ);
checkCudaErrors(cudaMemcpy(h_csrValJ, csrValJ, sizeof(double) * nnzJ, cudaMemcpyDeviceToHost));
checkCudaErrors(cusolverSpDcsrluBufferInfoHost(spHandle, length, nnzJ, matDescA,h_csrValJ, h_csrRowPtrJ, h_csrColIndJ, info,&size_internal, &size_lu));
char *buffer = (char*) malloc(size_lu * sizeof(char));
int singularity = 0;
const double tol = 1.e-14;
const double pivot_threshold = 1.0;
checkCudaErrors(cusolverSpDcsrluFactorHost(spHandle, length, nnzJ, matDescA,h_csrValJ, h_csrRowPtrJ, h_csrColIndJ, info, pivot_threshold, buffer));
checkCudaErrors(cusolverSpDcsrluZeroPivotHost(spHandle, info, tol,&singularity));
double *X1 = (double*) malloc(length * sizeof(double));
// checkCudaErrors(cusolverSpDcsrluSolveHost(spHandle, n, B[i], X1[i], info,buffer));
checkCudaErrors(cudaDeviceSynchronize());
free(buffer);
checkCudaErrors(cusolverSpDestroy(spHandle));
checkCudaErrors(cusolverSpDestroyCsrluInfoHost(info));
free(h_csrValJ);
}
}
__host__ void solver_LS_with_RF()
{
int length = H_NPV + 2 * H_NPQ;
cusolverSpHandle_t spHandle;
csrluInfoHost_t info;
checkCudaErrors(cusolverSpCreateCsrluInfoHost(&info));
checkCudaErrors(cusolverSpCreate(&spHandle));
cusparseMatDescr_t matDescA = 0;
cusparseCreateMatDescr(&matDescA);
cusparseSetMatType(matDescA, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(matDescA, CUSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(cusolverSpXcsrluAnalysisHost(
spHandle,
length,
nnzJ,
matDescA,
h_csrRowPtrJ,
h_csrColIndJ,
info));
size_t size_internal;
size_t size_lu;
double *h_csrValJ;
h_csrValJ = (double*) malloc(sizeof(double) * nnzJ);
checkCudaErrors(cudaMemcpy(h_csrValJ, csrValJ, sizeof(double) * nnzJ, cudaMemcpyDeviceToHost));
checkCudaErrors(cusolverSpDcsrluBufferInfoHost(
spHandle,
length,
nnzJ,
matDescA,
h_csrValJ,
h_csrRowPtrJ,
h_csrColIndJ,
info,
&size_internal,
&size_lu));
char *buffer = (char*) malloc(size_lu * sizeof(char));
int singularity = 0;
const double tol = 1.e-14;
const double pivot_threshold = 1.0;
checkCudaErrors(cusolverSpDcsrluFactorHost(
spHandle,
length,
nnzJ,
matDescA,
h_csrValJ,
h_csrRowPtrJ,
h_csrColIndJ,
info,
pivot_threshold,
buffer));
checkCudaErrors(cusolverSpDcsrluZeroPivotHost(
spHandle,
info,
tol,
&singularity));
double *h_F = (double*) malloc(length * sizeof(double));
double *h_X = (double*) malloc(length * sizeof(double));
checkCudaErrors(cudaMemcpy(h_F, F, length * sizeof(double), cudaMemcpyDeviceToHost));
checkCudaErrors(cusolverSpDcsrluSolveHost(spHandle, length, h_F, h_X, info, buffer));
checkCudaErrors(cudaMemcpy(F, h_X, length * sizeof(double), cudaMemcpyHostToDevice));
checkCudaErrors(cudaDeviceSynchronize());
int nnzL;
int nnzU;
checkCudaErrors(cusolverSpXcsrluNnzHost(spHandle, &nnzL, &nnzU, info));
int *h_P = (int*) malloc(sizeof(int) * length);
int *h_Q = (int*) malloc(sizeof(int) * length);
double *h_csrValL = (double*) malloc(sizeof(double) * nnzL);
int *h_csrRowPtrL = (int*) malloc(sizeof(int) * (length + 1));
int *h_csrColIndL = (int*) malloc(sizeof(int) * nnzL);
double *h_csrValU = (double*) malloc(sizeof(double) * nnzU);
int *h_csrRowPtrU = (int*) malloc(sizeof(int) * (length + 1));
int *h_csrColIndU = (int*) malloc(sizeof(int) * nnzU);
checkCudaErrors(cusolverSpDcsrluExtractHost(
spHandle,
h_P,
h_Q,
matDescA,
h_csrValL,
h_csrRowPtrL,
h_csrColIndL,
matDescA,
h_csrValU,
h_csrRowPtrU,
h_csrColIndU,
info,
buffer));
cusolverRfHandle_t rfHandle;
checkCudaErrors(cusolverRfCreate(&rfHandle));
checkCudaErrors(cusolverRfSetNumericProperties(rfHandle, 0.0, 0.0));
checkCudaErrors(cusolverRfSetAlgs(
rfHandle,
CUSOLVERRF_FACTORIZATION_ALG0,
CUSOLVERRF_TRIANGULAR_SOLVE_ALG1));
checkCudaErrors(cusolverRfSetMatrixFormat(
rfHandle,
CUSOLVERRF_MATRIX_FORMAT_CSR,
CUSOLVERRF_UNIT_DIAGONAL_ASSUMED_L));
checkCudaErrors(cusolverRfSetResetValuesFastMode(
rfHandle,
CUSOLVERRF_RESET_VALUES_FAST_MODE_ON));
int *d_P;
int *d_Q;
double *d_x;
double *d_T;
checkCudaErrors(cudaMalloc((void** ) &d_P, length * sizeof(int)));
checkCudaErrors(cudaMalloc((void** ) &d_Q, length * sizeof(int)));
checkCudaErrors(cudaMalloc((void** ) &d_x, length * sizeof(double)));
checkCudaErrors(cudaMalloc((void** ) &d_T, length * sizeof(double)));
checkCudaErrors(cusolverRfSetupHost(
length,
nnzJ,
h_csrRowPtrJ,
h_csrColIndJ,
h_csrValJ,
nnzL,
h_csrRowPtrL,
h_csrColIndL,
h_csrValL,
nnzU,
h_csrRowPtrU,
h_csrColIndU,
h_csrValU,
h_P,
h_Q,
rfHandle));
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cusolverRfAnalyze(rfHandle));
for (int i = 1; i < H_NTESTS; i++)
{
checkCudaErrors(cudaMemcpy(d_P, h_P, sizeof(int) * length, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_Q, h_Q, sizeof(int) * length, cudaMemcpyHostToDevice));
checkCudaErrors(cusolverRfResetValues(length, nnzJ, csrRowPtrJ, csrColIndJ, csrValJ + nnzJ * i, d_P, d_Q, rfHandle));
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cusolverRfRefactor(rfHandle));
checkCudaErrors(cusolverRfSolve(rfHandle, d_P, d_Q, 1, d_T, length, F + length * i, length));
}
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cusolverRfDestroy(rfHandle));
checkCudaErrors(cudaFree(d_P));
checkCudaErrors(cudaFree(d_Q));
checkCudaErrors(cudaFree(d_T));
checkCudaErrors(cudaFree(d_x));
free(h_csrValJ);
free(h_P);
free(h_Q);
free(h_csrValL);
free(h_csrColIndL);
free(h_csrRowPtrL);
free(h_csrValU);
free(h_csrRowPtrU);
free(h_csrColIndU);
free(buffer);
checkCudaErrors(cusolverSpDestroy(spHandle));
checkCudaErrors(cusolverSpDestroyCsrluInfoHost(info));
}
__host__ void hybrid_solver_MKL_DSS()
{
static int length = H_NPV + 2 * H_NPQ;
static double *h_csrValJ = new double[nnzJ * H_NTESTS];
static double *h_F = new double[length * H_NTESTS];
static double *h_X = new double[length * H_NTESTS];
double start = GetTimer();
checkCudaErrors(cudaMemcpy(h_csrValJ, csrValJ, sizeof(double) * nnzJ * H_NTESTS, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_F, F, length * sizeof(double) * H_NTESTS, cudaMemcpyDeviceToHost));
timeTable[TIME_D2H_MEM_COPY] += GetTimer() - start;
//#pragma omp parallel for
for(int t = 0; t < H_NTESTS; t++)
{
/* double *h_csrValJ = (double*) malloc(sizeof(double) * nnzJ);
double *h_F = (double*) malloc(length * sizeof(double));
double *h_X = (double*) malloc(length * sizeof(double));
*/
_MKL_DSS_HANDLE_t handle;
MKL_INT opt;
opt = MKL_DSS_MSG_LVL_WARNING;
// opt += MKL_DSS_TERM_LVL_ERROR;
opt += MKL_DSS_ZERO_BASED_INDEXING;
MKL_INT result;
result = DSS_CREATE(handle, opt); if(result != MKL_DSS_SUCCESS){printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
MKL_INT opt_define = MKL_DSS_NON_SYMMETRIC;
result = DSS_DEFINE_STRUCTURE(handle, opt_define, h_csrRowPtrJ, length, length, h_csrColIndJ, nnzJ);if(result != MKL_DSS_SUCCESS){printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
int *perm = (int*) MKL_malloc(sizeof(int) * length, 64);
for(int i = 0; i < length; i++){
perm[i] = i;
}
MKL_INT opt_REORDER = MKL_DSS_AUTO_ORDER;
result = DSS_REORDER(handle, opt_REORDER,perm);if(result != MKL_DSS_SUCCESS){printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
// MKL_INT opt_REORDER2 = MKL_DSS_GET_ORDER;
// result = DSS_REORDER(handle, opt_REORDER2,perm);if(result != MKL_DSS_SUCCESS){printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
/* start = GetTimer();
checkCudaErrors(cudaMemcpy(h_csrValJ, csrValJ + t * nnzJ, sizeof(double) * nnzJ, cudaMemcpyDeviceToHost));
timeTable[TIME_D2H_MEM_COPY] += GetTimer() - start;
*/
MKL_INT opt_FACTOR = MKL_DSS_POSITIVE_DEFINITE;
result = DSS_FACTOR_REAL(handle, opt_FACTOR, h_csrValJ + t * nnzJ);if(result != MKL_DSS_SUCCESS){printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
MKL_INT opt_DEFAULT = MKL_DSS_DEFAULTS;
MKL_INT nrhs = 1;
/* start = GetTimer();
checkCudaErrors(cudaMemcpy(h_F, F + t * length, length * sizeof(double), cudaMemcpyDeviceToHost));
timeTable[TIME_D2H_MEM_COPY] += GetTimer() - start;
*/
result = DSS_SOLVE_REAL(handle, opt_DEFAULT, h_F + t * length, nrhs, h_X + t * length);if(result != MKL_DSS_SUCCESS){printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
result = DSS_DELETE(handle, opt_DEFAULT);if(result != MKL_DSS_SUCCESS){printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
MKL_free(perm);
/* start = GetTimer();
checkCudaErrors(cudaMemcpy(F + t * length, h_X, length * sizeof(double), cudaMemcpyHostToDevice));
timeTable[TIME_H2D_MEM_COPY] += GetTimer() - start;
free(h_csrValJ);
free(h_F);
free(h_X);
*/
}
start = GetTimer();
checkCudaErrors(cudaMemcpy(F, h_X, length * sizeof(double) * H_NTESTS, cudaMemcpyHostToDevice));
timeTable[TIME_H2D_MEM_COPY] += GetTimer() - start;
/* free(h_csrValJ);
free(h_F);
free(h_X);
*/
}
__host__ void hybrid_eigen_sparseLU_solver(){
int length = H_NPV + 2 * H_NPQ;
#pragma omp parallel for
for(int t = 0; t < H_NTESTS; t++)
{
double *h_csrValJ = (double*) malloc(sizeof(double) * nnzJ);
double *h_F = (double*) malloc(length * sizeof(double));
double *h_X = (double*) malloc(length * sizeof(double));
checkCudaErrors(cudaMemcpy(h_csrValJ, csrValJ + t * nnzJ, sizeof(double) * nnzJ, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_F, F + t * length, length * sizeof(double), cudaMemcpyDeviceToHost));
SparseMatrix<double> A(length, length);
for(int i = 0; i < length; i++){
for(int k = h_csrRowPtrJ[i]; k < h_csrRowPtrJ[i+1]; k++){
int j = h_csrColIndJ[k];
A.insert(i, j) = h_csrValJ[k];
}
}
A.makeCompressed();
SparseLU<SparseMatrix<double>, COLAMDOrdering<int> > solverA;
solverA.compute(A);
VectorXd B(length);
for(int i = 0; i < length; i++){
B(i) = h_F[i];
}
VectorXd X = solverA.solve(B);
for(int i = 0; i < length; i++){
h_X[i] = X(i);
}
checkCudaErrors(cudaMemcpy(F + t * length, h_X, length * sizeof(double), cudaMemcpyHostToDevice));
free(h_csrValJ);
free(h_F);
free(h_X);
}
}
__host__ void hybrid_newtonpf()
{
int length = H_NPV + 2 * H_NPQ;
double err[H_NTESTS];
double start;
start = GetTimer();
for(int t = 0; t < H_NTESTS; t++)
{
hybrid_checkConvergence<<<BLOCKS((H_NPV + H_NPQ), H_THREADS), H_THREADS, 0, stream[t]>>>(
t,
device_buses,
device_pv,
device_pq,
nnzYbus,
csrRowPtrYbus,
csrColIndYbus,
csrValYbus + t * nnzYbus,
V + t * H_NBUS,
F + t * length);
}
checkCudaErrors(cudaDeviceSynchronize());
#ifdef DEBUG
for (int t = 0; t < H_NTESTS; t++)
{
double *h_val = (double*) malloc(sizeof(double) * length);
cudaMemcpy(h_val, F + length * t, sizeof(double) * length, cudaMemcpyDeviceToHost);
printf("F[%d] = \n", t);
for(int i = 0; i < length; i++){
double value = h_val[i];
printf("\t(%d)\t->\t%.4e\n", i+1, value);
}
free(h_val);
}
#endif
int iter = 0;
bool converged = true;
double* h_F = (double*) malloc(sizeof(double) * length * H_NTESTS);
checkCudaErrors(cudaMemcpy(h_F, F, sizeof(double) * length * H_NTESTS, cudaMemcpyDeviceToHost));
for(int t = 0; t < H_NTESTS; t++)
{
err[t] = 0.0;
for(int i = 0; i < length; i++){
err[t] = max(err[t], abs(h_F[i + length * t]));
}
if (err[t] < EPS) {
converged_test[t] = true;
} else {
converged_test[t] = false;
}
converged &= converged_test[t];
}
timeTable[TIME_CHECKCONVERGENCE] += GetTimer() - start;
while (!converged && iter < MAX_IT_NR) {
iter++;
start = GetTimer();
for(int t = 0; t < H_NTESTS && !converged_test[t]; t++)
{
hybrid_computeDiagIbus<<<BLOCKS(H_NBUS, H_THREADS), H_THREADS, 0, stream[t]>>>
(t,
nnzYbus,
csrRowPtrYbus,
csrColIndYbus,
csrValYbus + t * nnzYbus,
V + t * H_NBUS,
diagIbus + t * H_NBUS);
}
cudaDeviceSynchronize();
timeTable[TIME_COMPUTEDIAGIBUS] += GetTimer() - start;
#ifdef DEBUG
for (int t = 0; t < H_NTESTS; t++)
{
cuDoubleComplex *h_val = (cuDoubleComplex*) malloc(sizeof(cuDoubleComplex) * H_NBUS);
cudaMemcpy(h_val, diagIbus + H_NBUS * t, sizeof(cuDoubleComplex) * H_NBUS, cudaMemcpyDeviceToHost);
printf("diagIbus[%d] = \n", t);
for(int i = 0; i < H_NBUS; i++){
cuDoubleComplex value = h_val[i];
printf("\t(%d)\t->\t%.4e %c %.4ei\n", i+1, value.x,((value.y < 0.0) ? '-' : '+'),((value.y < 0.0) ? -value.y : value.y));
}
free(h_val);
}
#endif
if(nnzJ == 0)
{
start = GetTimer();
hybrid_computeNnzJacobianMatrix();
timeTable[TIME_COMPUTENNZJACOBIANMATRIX] += GetTimer() - start;
}
start = GetTimer();
for(int t = 0; t < H_NTESTS && !converged_test[t]; t++)
{
hybrid_compuateJacobianMatrix<<<BLOCKS(nnzJ, H_THREADS), H_THREADS, 0, stream[t]>>>(
t,
nnzJ,
d_cooRowJ,
csrRowPtrJ,
csrColIndJ,
csrValJ + t * nnzJ,
device_pq,
device_pv,
nnzYbus,
csrRowPtrYbus,
csrColIndYbus,
csrValYbus + t * nnzYbus,
diagIbus + t * H_NBUS,
V + t * H_NBUS);
}
cudaDeviceSynchronize();
timeTable[TIME_COMPUTEJACOBIANMATRIX] += GetTimer() - start;
#ifdef DEBUG
for (int t = 0; t < H_NTESTS; t++)
{
int *h_row = (int*) malloc(sizeof(int) * (length + 1));
int *h_col = (int*) malloc(sizeof(int) * nnzJ);
double *h_val = (double*) malloc(sizeof(double) * nnzJ);
cudaMemcpy(h_row, csrRowPtrJ, sizeof(int) * (length + 1), cudaMemcpyDeviceToHost);
cudaMemcpy(h_col, csrColIndJ, sizeof(int) * nnzJ, cudaMemcpyDeviceToHost);
cudaMemcpy(h_val, csrValJ + nnzJ * t, sizeof(double) * nnzJ, cudaMemcpyDeviceToHost);
printf("J[%d] = \n", t);
printf("\tCompressed Sparse Column(rows = %d, cols = %d, nnz = %d [%.2lf])\n",
length, length,
nnzJ, nnzJ * 100.0f / (length * length));
for(int j = 0; j < length; j++){
for(int i = 0; i < length; i++){
for(int k = h_row[i]; k < h_row[i + 1]; k++){
if(j == h_col[k]){
double value = h_val[k];
printf("\t(%d, %d)\t->\t%.4e\n", i+1, j+1, value);
break;
}
}
}
}
free(h_row);
free(h_col);
free(h_val);
}
#endif
// compute update step ------------------------------------------------
// solver_LS_with_RF();
switch(H_LinearSolver){
case MKL_DSS:
start = GetTimer();
hybrid_solver_MKL_DSS();
timeTable[TIME_SOLVER_MKL_DSS] += GetTimer() - start;
break;
case Eigen_SparseLU:
start = GetTimer();
hybrid_eigen_sparseLU_solver();
timeTable[TIME_SOLVER_MKL_DSS] += GetTimer() - start;
break;
case cuSolver:
start = GetTimer();
//linearSolverSp(H_NTESTS);
solver_LS_with_RF();
timeTable[TIME_SOLVER_MKL_DSS] += GetTimer() - start;
break;
}
#ifdef DEBUG
for (int t = 0; t < H_NTESTS; t++)
{
double *h_val = (double*) malloc(sizeof(double) * length);
cudaMemcpy(h_val, F + length * t, sizeof(double) * length, cudaMemcpyDeviceToHost);
printf("dx[%d] = \n", t);
for(int i = 0; i < length; i++){
double value = h_val[i];
printf("\t(%d)\t->\t%.4e\n", i+1, -value);
}
free(h_val);
}
#endif
start = GetTimer();
for(int t = 0; t < H_NTESTS; t++)
{
hybrid_updateVoltage<<<BLOCKS((H_NPV + H_NPQ), H_THREADS), H_THREADS, 0, stream[t]>>>(
t,
device_pv,
device_pq,
V + t * H_NBUS,
F + t * length);
}
cudaDeviceSynchronize();
timeTable[TIME_UPDATEVOLTAGE] += GetTimer() - start;
#ifdef DEBUG
checkCudaErrors(cudaDeviceSynchronize());
for (int t = 0; t < H_NTESTS; t++)
{
cuDoubleComplex *h_V = (cuDoubleComplex*) malloc(sizeof(cuDoubleComplex) * H_NBUS);
cudaMemcpy(h_V, V + H_NBUS * t, sizeof(cuDoubleComplex) * H_NBUS, cudaMemcpyDeviceToHost);
printf("V[%d] = \n", t);
for(int i = 0; i < H_NBUS; i++)
{
printf("\t[%d] -> %.4e %c %.4ei\n",i , h_V[i].x, ((h_V[i].y < 0) ? '-' : '+'), ((h_V[i].y < 0) ? -h_V[i].y : h_V[i].y));
}
free(h_V);
}
#endif
start = GetTimer();
for(int t = 0; t < H_NTESTS; t++)
{
hybrid_checkConvergence<<<BLOCKS((H_NPV + H_NPQ), H_THREADS), H_THREADS, 0, stream[t]>>>(
t,
device_buses,
device_pv,
device_pq,
nnzYbus,
csrRowPtrYbus,
csrColIndYbus,
csrValYbus + t * nnzYbus,
V + t * H_NBUS,
F + t * length);
}
checkCudaErrors(cudaDeviceSynchronize());
timeTable[TIME_COMPUTE_POWER] += GetTimer() - start;
#ifdef DEBUG
for (int t = 0; t < H_NTESTS; t++)
{
double *h_val = (double*) malloc(sizeof(double) * length);
cudaMemcpy(h_val, F + length * t, sizeof(double) * length, cudaMemcpyDeviceToHost);
printf("F[%d] = \n", t);
for(int i = 0; i < length; i++){
double value = h_val[i];
printf("\t(%d)\t->\t%.4e\n", i+1, value);
}
free(h_val);
}
#endif
start = GetTimer();
converged = true;
checkCudaErrors(cudaMemcpy(h_F, F, sizeof(double) * length * H_NTESTS, cudaMemcpyDeviceToHost));
for(int t = 0; t < H_NTESTS; t++)
{
err[t] = 0.0;
for(int i = 0; i < length; i++)
{
err[t] = max(err[t], h_F[i + length * t]);
}
if (err[t] < EPS)
{
converged_test[t] = true;
}
else // if (err[t] < EPS)
{
converged_test[t] = false;
}
converged &= converged_test[t];
}
timeTable[TIME_CHECKCONVERGENCE] += GetTimer() - start;
}
free(h_F);
}
#endif /* NEWTONPF_CUH_ */
|
lr_CPM_paralelo.c | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#define frand(M) (M*(((double)rand())/RAND_MAX))
#define N 2000000
double X[N];
double Y[N];
double cost (int nn, double vx[], double vy[], double t0, double t1)
{
int i;
double val,sum=0.0;
#pragma omp parallel for reduction (+:sum) private (i,val)
for(i=0;i<nn;i++)
{
val = t0 + t1*vx[i] - vy[i];
sum += val * val;
}
sum /= 2*nn;
return(sum);
}
int gradientDescent (int nn, double vx[], double vy[], double alpha, double *the0, double *the1)
{
int i;
double val;
double z0,z1;
double c=0,ca;
double t0=*the0, t1=*the1;
double a_n = alpha/nn;
int iter = 0;
double error = 0.000009; // cinc decimals
do
{
z0 = z1 = 0.0;
#pragma omp parallel for reduction (+:z0,z1) private (i,val)
for(i=0;i<nn;i++)
{
val = t0 + t1*vx[i] - vy[i];
z0 += val;
z1 += val * vx[i];
}
t0 -= z0 * a_n;
t1 -= z1 * a_n;
iter++;
ca = c;
c = cost(nn,vx,vy,t0,t1);
}
while (fabs(c - ca) > error);
*the0 = t0;
*the1 = t1;
return(iter);
}
int main()
{
int i;
double ct;
double theta0=0, theta1=1;
srand(1);
for (i=0;i<N;i++)
{
X[i] = frand(13);
Y[i] = frand(9) + ((1.66 + (frand(0.9))) * X[i]) * X[i] ;
}
//for (i=0;i<N;i++) printf("%g %g\n",X[i],Y[i]);
i=gradientDescent (N, X, Y, 0.01, &theta0, &theta1);
ct=cost(N,X,Y,theta0,theta1);
printf ("(%d) Theta; %g, %g cost: %g\n",i,theta0,theta1,ct);
return(0);
}
|
for-21.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -fdump-tree-ompexp" } */
extern void bar(int);
void foo (int n)
{
int i;
#pragma omp for schedule(nonmonotonic:dynamic, 2)
for (i = 0; i < n; ++i)
bar(i);
}
/* { dg-final { scan-tree-dump-times "GOMP_loop_nonmonotonic_dynamic_start" 1 "ompexp" } } */
/* { dg-final { scan-tree-dump-times "GOMP_loop_nonmonotonic_dynamic_next" 1 "ompexp" } } */
|
thapi.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include "thnets.h"
static int lasterror;
static short TB_YUR[256], TB_YUB[256], TB_YUGU[256], TB_YUGV[256], TB_Y[256];
static unsigned char TB_SAT[1024 + 1024 + 256];
int th_debug, th_profile, th_minmax;
#ifdef CUDNN
int cuda_maphostmem;
#endif
#define BYTE2FLOAT 0.003921568f // 1/255
static void rgb2float(float *dst, const unsigned char *src, int width, int height, int srcstride, int cp, const float *mean, const float *std)
{
int c, i, j;
float std1[3];
for(i = 0; i < cp; i++)
std1[i] = 1 / std[i];
#pragma omp parallel for private(c, i, j)
for(c = 0; c < cp; c++)
for(i = 0; i < height; i++)
for(j = 0; j < width; j++)
dst[j + (i + c * height) * width] = (src[c + cp*j + srcstride*i] * BYTE2FLOAT - mean[c]) * std1[c];
}
static void bgr2float(float *dst, const unsigned char *src, int width, int height, int srcstride, int cp, const float *mean, const float *std)
{
int c, i, j;
float std1[3];
for(i = 0; i < cp; i++)
std1[i] = 1 / std[i];
#pragma omp parallel for private(c, i, j)
for(c = 0; c < cp; c++)
for(i = 0; i < height; i++)
for(j = 0; j < width; j++)
dst[j + (i + c * height) * width] = (src[cp-1-c + cp*j + srcstride*i] * BYTE2FLOAT - mean[c]) * std1[c];
}
static void init_yuv2rgb()
{
int i;
/* calculate lookup table for yuv420p */
for (i = 0; i < 256; i++) {
TB_YUR[i] = 459 * (i-128) / 256;
TB_YUB[i] = 541 * (i-128) / 256;
TB_YUGU[i] = -137 * (i-128) / 256;
TB_YUGV[i] = - 55 * (i-128) / 256;
TB_Y[i] = (i-16) * 298 / 256;
}
for (i = 0; i < 1024; i++) {
TB_SAT[i] = 0;
TB_SAT[i + 1024 + 256] = 255;
}
for (i = 0; i < 256; i++)
TB_SAT[i + 1024] = i;
}
static void yuyv2fRGB(const unsigned char *frame, float *dst_float, int imgstride, int rowstride, int w, int h, const float *mean, const float *std)
{
int i, j, w2 = w / 2, c;
float std0 = 1/std[0];
float std1 = 1/std[1];
float std2 = 1/std[2];
#pragma omp parallel for private(c, i, j)
for(c = 0; c < 3; c++)
{
float *dst;
const unsigned char *src;
if(c == 0)
{
/* convert for R channel */
src = frame;
for (i = 0; i < h; i++) {
dst = dst_float + i * rowstride;
for (j = 0; j < w2; j++) {
*dst++ = (TB_SAT[ TB_Y[ src[0] ] + TB_YUR[ src[3] ] + 1024] * BYTE2FLOAT - mean[0]) * std0;
*dst++ = (TB_SAT[ TB_Y[ src[2] ] + TB_YUR[ src[3] ] + 1024] * BYTE2FLOAT - mean[0]) * std0;
src += 4;
}
}
} else if(c == 1)
{
/* convert for G channel */
src = frame;
for (i = 0; i < h; i++) {
dst = dst_float + i * rowstride + imgstride;
for (j = 0; j < w2; j++) {
*dst++ = (TB_SAT[ TB_Y[ src[0] ] + TB_YUGU[ src[1] ] + TB_YUGV[ src[3] ] + 1024] * BYTE2FLOAT - mean[1]) * std1;
*dst++ = (TB_SAT[ TB_Y[ src[2] ] + TB_YUGU[ src[1] ] + TB_YUGV[ src[3] ] + 1024] * BYTE2FLOAT - mean[1]) * std1;
src += 4;
}
}
} else if(c == 2)
{
/* convert for B channel */
src = frame;
for (i = 0; i < h; i++) {
dst = dst_float + i * rowstride + 2*imgstride;
for (j = 0; j < w2; j++) {
*dst++ = (TB_SAT[ TB_Y[ src[0] ] + TB_YUB[ src[1] ] + 1024] * BYTE2FLOAT - mean[2]) * std2;
*dst++ = (TB_SAT[ TB_Y[ src[2] ] + TB_YUB[ src[1] ] + 1024] * BYTE2FLOAT - mean[2]) * std2;
src += 4;
}
}
}
}
}
double th_seconds()
{
static double s;
#ifdef __MACH__
struct timeval tv;
struct timezone tz;
gettimeofday(&tv, &tz);
if(!s)
s = tv.tv_sec + tv.tv_usec * 1e-6;
return tv.tv_sec + tv.tv_usec * 1e-6 - s;
#else
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
if(!s)
s = ts.tv_sec + ts.tv_nsec * 1e-9;
return ts.tv_sec + ts.tv_nsec * 1e-9 - s;
#endif
}
void FindMinMax(THFloatTensor *t, float *min, float *max)
{
*min = THInf;
*max = -THInf;
float *data = THFloatTensor_data(t);
long i, n = THFloatTensor_nElement(t);
for(i = 0; i < n; i++)
{
if(data[i] > *max)
*max = data[i];
if(data[i] < *min)
*min = data[i];
}
}
double th_convtot, th_convflops;
THFloatTensor *forward(struct network *net, THFloatTensor *in)
{
int i;
double t = 0;
th_convtot = 0;
th_convflops = 0;
#ifdef OPENCL
if(net->engine == ENGINE_OPENCL)
OpenCL_Build(net, in);
#endif
for(i = 0; i < net->nelem; i++)
{
if(th_profile)
t = th_seconds();
#ifdef ONNX
// In case of ONNX the network is not sequential, but each module has the list of inputs,
// which are guaranteed to have been already calculated
if(net->modules[i].ninputs == 1 && net->modules[i].type != MT_JoinTable)
in = net->modules[i].updateOutput(&net->modules[i], net->modules[net->modules[i].inputs[0]].output);
else if(net->modules[i].ninputs >= 1)
{
// Nodes with multiple inputs expect a module of type ConcatTable instead of THFloatTensor as their input
struct module modules[net->modules[i].ninputs];
struct network subnet;
struct module m;
int j;
for(j = 0; j < net->modules[i].ninputs; j++)
modules[j].output = net->modules[net->modules[i].inputs[j]].output;
subnet.nelem = net->modules[i].ninputs;
subnet.modules = modules;
subnet.engine = net->engine;
m.ConcatTable.net = &subnet;
in = net->modules[i].updateOutput(&net->modules[i], (THFloatTensor *)&m);
} else
#endif
in = net->modules[i].updateOutput(&net->modules[i], in);
// You can remove these lines if you don't have problems with memory
// These lines free intermediate results
if(th_minmax)
{
float min, max;
FindMinMax(in, &min, &max);
printf("Layer %d output: min=%f, max=%f\n", i+1, min, max);
}
#ifndef ONNX
// In case of ONNX we cannot free an output, as we can still need it
if(i > 0)
{
THFloatTensor_free(net->modules[i-1].output);
net->modules[i-1].output = THFloatTensor_new();
}
#endif
if(th_profile)
{
#ifdef OPENCL
if(net->engine == ENGINE_OPENCLINIT)
clFinish(cl_queue);
#endif
t = th_seconds() - t;
if(net->modules[i].type == MT_SpatialConvolutionMM ||
net->modules[i].type == MT_SpatialConvolutionVirtMM ||
net->modules[i].type == MT_SpatialConvolution)
{
double flops = 2.0 * THFloatTensor_nElement(in) * net->modules[i].SpatialConvolution.nInputPlane *
net->modules[i].SpatialConvolution.kW * net->modules[i].SpatialConvolution.kH;
printf("%f seconds for module %d, %f Gflops/s\n", t, i+1, flops * 1e-9 / t);
th_convtot += t;
th_convflops += flops;
} else printf("%f seconds for module %d\n", t, i+1);
}
if(th_debug > 1)
printf("%d) %d %d %ld %ld %ld %ld\n", i+1, net->modules[i].type, in->nDimension, in->size[0], in->size[1], in->size[2], in->size[3]);
}
if(th_profile)
printf("%f seconds for convolutions %f Gflops/s\n", th_convtot, th_convflops * 1e-9 / th_convtot);
return in;
}
THFloatTensor *THForward(THNETWORK *net, THFloatTensor *in)
{
if(net->pynet)
return forward_pytorch(net->pynet, in, net->allpynodes);
else return forward(net->net, in);
}
THNETWORK *THLoadNetwork(const char *path)
{
char tmppath[255];
int i, longsize = 8;
THNETWORK *net;
net = calloc(1, sizeof(*net));
net->std[0] = net->std[1] = net->std[2] = 1;
net->mean[0] = net->mean[1] = net->mean[2] = 0;
// Try ONNX
#ifdef ONNX
if(!strcasecmp(path + strlen(path) - 3, ".pb") || !strcasecmp(path + strlen(path) - 6, ".proto") ||
!strcasecmp(path + strlen(path) - 5, ".onnx"))
{
net->net = loadonnx(path);
if(net->net)
return net;
}
#endif
// Try pytorch
net->allpynodes = calloc(MAXPYNODES, sizeof(*net->allpynodes));
net->pynet = loadpytorch(path, net->allpynodes);
if(net->pynet)
return net;
sprintf(tmppath, "%s/pymodel.net", path);
net->pynet = loadpytorch(tmppath, net->allpynodes);
if(net->pynet)
return net;
free(net->allpynodes);
net->allpynodes = 0;
// Try torch
sprintf(tmppath, "%s/model.net", path);
net->netobj = malloc(sizeof(*net->netobj));
lasterror = loadtorch(tmppath, net->netobj, longsize);
if(lasterror == ERR_CORRUPTED)
lasterror = loadtorch(tmppath, net->netobj, longsize = 4);
if(lasterror)
{
free(net->netobj);
free(net);
return 0;
}
if(th_debug)
printobject(net->netobj, 0);
if(net->netobj->type != TYPE_NNMODULE)
{
free(net->netobj);
free(net);
return 0;
}
net->net = Module2Network(net->netobj->nnmodule);
if(!net->net)
{
lasterror = ERR_WRONGOBJECT;
freeobject(net->netobj);
free(net->netobj);
free(net);
return 0;
}
sprintf(tmppath, "%s/stat.t7", path);
net->statobj = malloc(sizeof(*net->statobj));
lasterror = loadtorch(tmppath, net->statobj, longsize);
if(!lasterror)
{
if(net->statobj->type != TYPE_TABLE || net->statobj->table->nelem != 2)
{
lasterror = ERR_WRONGOBJECT;
freenetwork(net->net);
freeobject(net->netobj);
free(net->netobj);
freeobject(net->statobj);
free(net->statobj);
free(net);
return 0;
}
for(i = 0; i < net->statobj->table->nelem; i++)
if(net->statobj->table->records[i].name.type == TYPE_STRING)
{
if(!strcmp(net->statobj->table->records[i].name.string.data, "mean"))
memcpy(net->mean, net->statobj->table->records[i].value.tensor->storage->data, sizeof(net->mean));
else if(!strcmp(net->statobj->table->records[i].name.string.data, "std"))
memcpy(net->std, net->statobj->table->records[i].value.tensor->storage->data, sizeof(net->std));
}
} else {
free(net->statobj);
net->statobj = 0;
}
THUseSpatialConvolutionMM(net, 2);
return net;
}
void THInit()
{
static int init;
if(init)
return;
init_yuv2rgb();
#ifndef USEBLAS
blas_init();
#endif
init = 1;
#if defined CUDNN && defined USECUDAHOSTALLOC
// cuda_maphostmem = 1 requires that memory was allocated with cudaHostAlloc
// cuda_maphostmem = 2 will work with malloc, but Tegra TX1 does not support cudaHostRegister with cudaHostRegisterMapped
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
if(prop.canMapHostMemory)
{
errcheck(cudaSetDeviceFlags(cudaDeviceMapHost));
cuda_maphostmem = 1;
}
#endif
#ifdef OPENCL
thopencl_init();
#endif
}
int THProcessFloat(THNETWORK *network, float *data, int batchsize, int width, int height, int nplanes, float **result, int *outwidth, int *outheight)
{
int b, c, i;
THFloatTensor *t = THFloatTensor_new();
THFloatTensor *out;
t->nDimension = 4;
t->size[0] = batchsize;
t->size[1] = nplanes;
t->size[2] = height;
t->size[3] = width;
#ifdef USEQSML
t->stride[0] = nplanes * width * height;//batch
t->stride[1] = 1;//plane
t->stride[2] = nplanes * width;//row
t->stride[3] = nplanes;//col
#else
t->stride[0] = nplanes * width * height;//batch
t->stride[1] = width * height;//plane
t->stride[2] = width;//row
t->stride[3] = 1;//col
#endif
t->storage = THFloatStorage_newwithbuffer((float *)data);
if(t->stride[1] == 1){//row major-plane minor
#pragma omp parallel for private(b, i, c)
for(b = 0; b < batchsize; b++)
for(i = 0; i < width*height; i++)
for(c = 0; c < nplanes; c++)
data[b * t->stride[0] + c + i * t->stride[3]] =
(data[b * t->stride[0] + c + i * t->stride[3]] - network->mean[c]) / network->std[c];
}
else{//plane major
#pragma omp parallel for private(b, c, i)
for(b = 0; b < batchsize; b++)
for(c = 0; c < nplanes; c++)
for(i = 0; i < width*height; i++)
data[b * t->stride[0] + c * t->stride[1] + i] =
(data[b * t->stride[0] + c * t->stride[1] + i] - network->mean[c]) / network->std[c];
}
#ifdef CUDNN
if(network->net->engine == ENGINE_CUDA)
{
THFloatTensor *t2 = THCudaTensor_newFromFloatTensor(t);
out = THForward(network, t2);
THFloatTensor_free(t2);
if(network->out)
THFloatTensor_free(network->out);
network->out = THFloatTensor_newFromCudaTensor(out);
out = network->out;
} else
#endif
#ifdef OPENCL
if(network->net->engine == ENGINE_OPENCL || network->net->engine == ENGINE_OPENCLINIT)
{
THFloatTensor *t2 = THOpenCLTensor_newFromImageTensor(t);
out = THForward(network, t2);
THFloatTensor_free(t2);
if(network->out)
THFloatTensor_free(network->out);
network->out = THFloatTensor_newFromOpenCLImageTensor(out);
out = network->out;
} else
#endif
#ifdef LOWP
if(network->net->engine == ENGINE_LOWP)
{
THFloatTensor *t2 = THLowpTensor_newFromFloatTensor(t);
out = THForward(network, t2);
THFloatTensor_free(t2);
if(network->out)
THFloatTensor_free(network->out);
network->out = THFloatTensor_newFromLowpTensor(out);
out = network->out;
} else
#endif
out = THForward(network, t);
THFloatTensor_free(t);
*result = out->storage->data;
if(out->nDimension >= 3)
{
*outwidth = (int)out->size[out->nDimension - 1];
*outheight = (int)out->size[out->nDimension - 2];
} else *outwidth = *outheight = 1;
return (int)THFloatTensor_nElement(out);
}
int THProcessImages(THNETWORK *network, unsigned char **images, int batchsize, int width, int height, int stride, float **results, int *outwidth, int *outheight, int bgr)
{
int i, cp = 3;
THFloatTensor *out, *t = 0;
THFloatStorage *st;
if(stride < width*3)
cp = 1; // Guess color planes, if stride is less than 3*width, it cannot be 3 color planes, so assume grayscale
#ifdef CUDNN
if(network->net->engine == ENGINE_CUDA)
{
#ifdef HAVEFP16
if(floattype == CUDNN_DATA_HALF)
{
st = THCudaStorage_new(batchsize * (width * height * cp));
for(i = 0; i < batchsize; i++)
cuda_rgb2half((unsigned short *)st->data + i * (width * height * cp), images[i], width, height, stride, network->mean, network->std, bgr);
} else
#endif
{
st = THCudaStorage_new(batchsize * width * height * cp);
for(i = 0; i < batchsize; i++)
cuda_rgb2float(st->data + i * width * height * cp, images[i], width, height, stride, network->mean, network->std, bgr);
}
} else
#endif
#ifdef OPENCL
if(network->net->engine == ENGINE_OPENCL || network->net->engine == ENGINE_OPENCLINIT)
t = OpenCL_LoadImage(images[0], width, height, stride, network->mean, network->std, bgr);
else
#endif
#ifdef LOWP
if(network->net->engine == ENGINE_LOWP)
t = Lowp_LoadImages(images, batchsize, width, height, stride, network->mean, network->std, bgr);
else
#endif
{
st = THFloatStorage_new(batchsize * width * height * cp);
if(bgr)
#pragma omp parallel for if(batchsize>1) private(i)
for(i = 0; i < batchsize; i++)
bgr2float(st->data + i * width * height * cp, images[i], width, height, stride, cp, network->mean, network->std);
else
#pragma omp parallel for if(batchsize>1) private(i)
for(i = 0; i < batchsize; i++)
rgb2float(st->data + i * width * height * cp, images[i], width, height, stride, cp, network->mean, network->std);
}
if(!t)
{
t = THFloatTensor_new();
t->storage = st;
if(batchsize == 1)
{
t->nDimension = 3;
t->size[0] = cp;
t->size[1] = height;
t->size[2] = width;
t->stride[0] = width * height;
t->stride[1] = width;
t->stride[2] = 1;
} else {
t->nDimension = 4;
t->size[0] = batchsize;
t->size[1] = cp;
t->size[2] = height;
t->size[3] = width;
t->stride[0] = cp * width * height;
t->stride[1] = width * height;
t->stride[2] = width;
t->stride[3] = 1;
}
}
#ifdef CUDNN
if(network->net->engine == ENGINE_CUDA)
{
out = THForward(network, t);
if(network->out)
THFloatTensor_free(network->out);
#ifdef HAVEFP16
if(floattype == CUDNN_DATA_HALF)
network->out = THFloatTensor_newFromHalfCudaTensor(out);
else
#endif
network->out = THFloatTensor_newFromCudaTensor(out);
out = network->out;
} else
#endif
#ifdef OPENCL
if(network->net->engine == ENGINE_OPENCL || network->net->engine == ENGINE_OPENCLINIT)
{
out = THForward(network, t);
if(network->out)
THFloatTensor_free(network->out);
#ifdef HAVEFP16
if(cl_datasize == 2)
network->out = THFloatTensor_newFromHalfOpenCLImageTensor(out);
else
#endif
network->out = THFloatTensor_newFromOpenCLImageTensor(out);
out = network->out;
} else
#endif
#ifdef LOWP
if(network->net->engine == ENGINE_LOWP)
{
out = THForward(network, t);
if(network->out)
THFloatTensor_free(network->out);
network->out = THFloatTensor_newFromLowpTensor(out);
out = network->out;
} else
#endif
out = THForward(network, t);
THFloatTensor_free(t);
*results = out->storage->data;
if(out->nDimension >= 3)
{
*outwidth = (int)out->size[out->nDimension - 1];
*outheight = (int)out->size[out->nDimension - 2];
} else *outwidth = *outheight = 1;
return (int)THFloatTensor_nElement(out);
}
int THProcessYUYV(THNETWORK *network, unsigned char *image, int width, int height, float **results, int *outwidth, int *outheight)
{
THFloatTensor *out;
THFloatStorage *st;
#ifdef CUDNN
if(network->net->engine == ENGINE_CUDA)
THError("This function is not supported with CUDNN");
#endif
#ifdef OPENCL
if(network->net->engine == ENGINE_OPENCL || network->net->engine == ENGINE_OPENCLINIT)
THError("This function is not supported with OpenCL");
#endif
#ifdef LOWP
if(network->net->engine == ENGINE_LOWP)
THError("This function is not supported with Lowp");
#endif
st = THFloatStorage_new(width * height * 3);
yuyv2fRGB(image, st->data, width*height, width, width, height, network->mean, network->std);
THFloatTensor *t = THFloatTensor_new();
t->storage = st;
t->nDimension = 3;
t->size[0] = 3;
t->size[1] = height;
t->size[2] = width;
t->stride[0] = width * height;
t->stride[1] = width;
t->stride[2] = 1;
out = THForward(network, t);
THFloatTensor_free(t);
*results = out->storage->data;
if(out->nDimension >= 3)
{
*outwidth = (int)out->size[out->nDimension - 1];
*outheight = (int)out->size[out->nDimension - 2];
} else *outwidth = *outheight = 1;
return (int)THFloatTensor_nElement(out);
}
void THFreeNetwork(THNETWORK *network)
{
if(network->allpynodes)
free(network->allpynodes);
if(network->pynet)
freepynet(network->pynet);
if(network->net)
freenetwork(network->net);
if(network->netobj)
{
freeobject(network->netobj);
free(network->netobj);
}
if(network->statobj)
{
freeobject(network->statobj);
free(network->statobj);
}
if(network->out)
THFloatTensor_free(network->out);
free(network);
}
int THLastError()
{
return lasterror;
}
void THMakeSpatial(THNETWORK *network, int size)
{
int i, nInputPlane = 3;
for(i = 0; i < network->net->nelem; i++)
{
if(network->net->modules[i].type == MT_View || network->net->modules[i].type == MT_Reshape)
{
THFloatTensor_free(network->net->modules[i].output);
memmove(network->net->modules+i, network->net->modules+i+1, sizeof(*network->net->modules) * (network->net->nelem - i - 1));
network->net->nelem--;
i--;
} else if(network->net->modules[i].type == MT_Linear)
{
THFloatTensor_free(network->net->modules[i].Linear.addBuffer);
network->net->modules[i].updateOutput = nn_SpatialConvolutionMM_updateOutput;
#ifndef USEBLAS
network->net->modules[i].type = MT_SpatialConvolutionVirtMM;
#else
network->net->modules[i].type = MT_SpatialConvolutionMM;
#endif
struct SpatialConvolution *c = &network->net->modules[i].SpatialConvolution;
c->finput = THFloatTensor_new();
c->padW = c->padH = 0;
c->dW = c->dH = 1;
c->kW = c->kH = size;
c->nInputPlane = nInputPlane;
nInputPlane = c->nOutputPlane = (int)c->weight->size[0];
size = (size + 2*c->padW - c->kW) / c->dW + 1;
} else if(network->net->modules[i].type == MT_SpatialConvolution ||
network->net->modules[i].type == MT_SpatialConvolutionMM ||
network->net->modules[i].type == MT_SpatialConvolutionVirtMM)
{
struct SpatialConvolution *c = &network->net->modules[i].SpatialConvolution;
size = (size + 2*c->padW - c->kW) / c->dW + 1;
nInputPlane = network->net->modules[i].SpatialConvolution.nOutputPlane;
} else if(network->net->modules[i].type == MT_SpatialMaxPooling)
{
struct SpatialMaxPooling *c = &network->net->modules[i].SpatialMaxPooling;
if(c->ceil_mode)
size = (ceil((float)(size - c->kH + 2*c->padH) / c->dH)) + 1;
else size = (floor((float)(size - c->kH + 2*c->padH) / c->dH)) + 1;
} else if(network->net->modules[i].type == MT_SpatialZeroPadding)
{
struct SpatialZeroPadding *c = &network->net->modules[i].SpatialZeroPadding;
size += c->pad_l + c->pad_r;
}
}
}
int THUseSpatialConvolutionMM(THNETWORK *network, int mm_type)
{
int i;
int rc = 0;
if(!network->net)
return rc = ERR_NOTIMPLEMENTED;
for(i = 0; i < network->net->nelem; i++)
{
if(mm_type && network->net->modules[i].type == MT_SpatialConvolution)
{
struct SpatialConvolution *c = &network->net->modules[i].SpatialConvolution;
network->net->modules[i].type = MT_SpatialConvolutionMM;
network->net->modules[i].updateOutput = nn_SpatialConvolutionMM_updateOutput;
THFloatTensor_resize2d(c->weight, c->nOutputPlane, c->nInputPlane * c->kH * c->kW);
} else if(!mm_type && (network->net->modules[i].type == MT_SpatialConvolutionMM ||
network->net->modules[i].type == MT_SpatialConvolutionVirtMM))
{
struct SpatialConvolution *c = &network->net->modules[i].SpatialConvolution;
if(c->padW || c->padH)
{
rc = ERR_NOTIMPLEMENTED;
continue;
}
network->net->modules[i].type = MT_SpatialConvolution;
network->net->modules[i].updateOutput = nn_SpatialConvolution_updateOutput;
THFloatTensor_resize4d(c->weight, c->nOutputPlane, c->nInputPlane, c->kH, c->kW);
}
#ifndef USEBLAS
if(mm_type == 2 && network->net->modules[i].type == MT_SpatialConvolutionMM)
network->net->modules[i].type = MT_SpatialConvolutionVirtMM;
else if(mm_type == 1 && network->net->modules[i].type == MT_SpatialConvolutionVirtMM)
network->net->modules[i].type = MT_SpatialConvolutionMM;
#endif
}
return rc;
}
THNETWORK *THCreateCudaNetwork(THNETWORK *net)
{
#ifdef CUDNN
THNETWORK *nn = malloc(sizeof(*nn));
memcpy(nn, net, sizeof(*nn));
nn->netobj = 0;
nn->statobj = 0;
nn->net = THcudnn_ToCUDNN(net->net);
return nn;
#else
return 0;
#endif
}
int THCudaHalfFloat(int enable)
{
#if defined CUDNN && defined HAVEFP16
if(enable)
{
floattype = CUDNN_DATA_HALF;
} else floattype = CUDNN_DATA_FLOAT;
return 0;
#else
return ERR_NOTIMPLEMENTED;
#endif
}
int THOpenCLHalfFloat(int enable)
{
#if defined OPENCL && defined HAVEFP16
if(enable)
{
cl_datasize = 2;
} else cl_datasize = 4;
return 0;
#else
return ERR_NOTIMPLEMENTED;
#endif
}
THNETWORK *THCreateOpenCLNetwork(THNETWORK *net)
{
#ifdef OPENCL
THNETWORK *nn = malloc(sizeof(*nn));
memcpy(nn, net, sizeof(*nn));
nn->netobj = 0;
nn->statobj = 0;
nn->net = THOpenCL_ToOpenCL(net->net);
return nn;
#else
return 0;
#endif
}
THNETWORK *THCreateLowpNetwork(THNETWORK *net, float range)
{
#ifdef LOWP
THNETWORK *nn = malloc(sizeof(*nn));
memcpy(nn, net, sizeof(*nn));
nn->netobj = 0;
nn->statobj = 0;
nn->net = THLowp_ToLowp(net->net, range);
return nn;
#else
return 0;
#endif
}
|
SceneObject.h | // Copyright 2021 The Khronos Group
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include "../Array.h"
#include "../util/Span.h"
// bvh
#include "bvh/BVH.h"
#include "bvh/Primitive.h"
#include "bvh/RayTraverser.h"
#include "bvh/SweepSAHBuilder.h"
namespace anari {
namespace example_device {
struct SceneObject : public Object
{
SceneObject() = default;
box3 bounds() const override;
bool getProperty(const std::string &name,
ANARIDataType type,
void *ptr,
ANARIWaitMask m) override;
virtual PotentialHit intersect(const Ray &ray, size_t prim) const = 0;
protected:
void setBounds(const box3 &b);
box3 m_bounds;
};
template <typename PRIM_T, typename BASE_CLASS_T = SceneObject>
struct IntersectableObject : public BASE_CLASS_T
{
using prim_t = PRIM_T;
IntersectableObject() = default;
PotentialHit intersect(const Ray &ray, size_t prim) const override;
protected:
void buildBVH(const PRIM_T *primitives, size_t numPrims);
BVH m_bvh;
const PRIM_T *m_primitives{nullptr};
size_t m_numPrims{0};
};
template <typename OBJECT_T>
struct IntersectablePtr
{
OBJECT_T *m_obj;
PotentialHit intersect(const Ray &ray, size_t prim) const;
box3 bounds() const;
};
// Helper functions ///////////////////////////////////////////////////////////
template <typename Primitive>
std::pair<std::vector<box3>, std::vector<vec3>>
compute_bounding_boxes_and_centers(const Primitive *p, size_t nPrims)
{
auto prim_bounds = std::vector<box3>(nPrims);
auto centers = std::vector<vec3>(nPrims);
#pragma omp parallel for
for (size_t i = 0; i < nPrims; ++i) {
const auto b = p[i].bounds();
prim_bounds[i] = b;
centers[i] = center(b);
}
return std::make_pair(std::move(prim_bounds), std::move(centers));
}
inline box3 compute_bounds_union(const box3 *bboxes, size_t count)
{
auto bbox = box3();
#pragma omp declare reduction(bbox_extend:box3 \
: omp_out.extend(omp_in)) \
initializer(omp_priv = box3())
#pragma omp parallel for reduction(bbox_extend : bbox)
for (size_t i = 0; i < count; ++i)
bbox.extend(bboxes[i]);
return bbox;
}
// Inlined defintions /////////////////////////////////////////////////////////
// SceneObject //
inline box3 SceneObject::bounds() const
{
return m_bounds;
}
inline void SceneObject::setBounds(const box3 &b)
{
m_bounds = b;
}
inline bool SceneObject::getProperty(
const std::string &name, ANARIDataType type, void *ptr, ANARIWaitMask mask)
{
if (name == "bounds" && type == ANARI_FLOAT32_BOX3) {
std::memcpy(ptr, &m_bounds, sizeof(m_bounds));
return true;
}
return Object::getProperty(name, type, ptr, mask);
}
// IntersectableObject //
template <typename PRIM_T, typename BASE_CLASS_T>
inline void IntersectableObject<PRIM_T, BASE_CLASS_T>::buildBVH(
const PRIM_T *primitives, size_t numPrims)
{
m_bvh = BVH();
if (!primitives || numPrims == 0) {
SceneObject::setBounds(box3());
return;
}
m_primitives = primitives;
m_numPrims = numPrims;
SweepSAHBuilder builder(m_bvh);
auto [bboxes, centers] =
compute_bounding_boxes_and_centers(primitives, numPrims);
auto global_bbox = compute_bounds_union(bboxes.data(), numPrims);
builder.build(global_bbox, bboxes.data(), centers.data(), numPrims);
SceneObject::setBounds(global_bbox);
}
template <typename PRIM_T, typename BASE_CLASS_T>
inline PotentialHit IntersectableObject<PRIM_T, BASE_CLASS_T>::intersect(
const Ray &ray, size_t) const
{
if (m_bvh.nodes.empty())
return std::nullopt;
SingleRayTraverser traverser(m_bvh, m_primitives);
return traverser.intersect(ray);
}
// IntersectablePtr //
template <typename OBJECT_T>
inline PotentialHit IntersectablePtr<OBJECT_T>::intersect(
const Ray &ray, size_t prim) const
{
return m_obj->intersect(ray, prim);
}
template <typename OBJECT_T>
inline box3 IntersectablePtr<OBJECT_T>::bounds() const
{
return m_obj->bounds();
}
} // namespace example_device
} // namespace anari |
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(4*t2-Nz,4));t3<=min(min(min(floord(4*t2+Ny,4),floord(Nt+Ny-4,4)),floord(2*t1+Ny+1,4)),floord(4*t1-4*t2+Nz+Ny-1,4));t3++) {
for (t4=max(max(max(0,ceild(t1-511,512)),ceild(4*t2-Nz-1020,1024)),ceild(4*t3-Ny-1020,1024));t4<=min(min(min(min(floord(4*t2+Nx,1024),floord(4*t3+Nx,1024)),floord(Nt+Nx-4,1024)),floord(2*t1+Nx+1,1024)),floord(4*t1-4*t2+Nz+Nx-1,1024));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),4*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),4*t3+2),1024*t4+1022),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(1024*t4,t5+1);
ubv=min(1024*t4+1023,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
adapter.h | /**
* Implementation of a lock-free relaxed (a,b)-tree using LLX/SCX.
* Trevor Brown, 2018.
*/
#ifndef DS_ADAPTER_H
#define DS_ADAPTER_H
#include <iostream>
#include "errors.h"
#ifdef USE_TREE_STATS
# define TREE_STATS_BYTES_AT_DEPTH
# include "tree_stats.h"
#endif
#include "brown_ext_abtree_lf_impl.h"
#if !defined FAT_NODE_DEGREE
// #warning "FAT_NODE_DEGREE was not defined... using default: 16."
#define FAT_NODE_DEGREE 11
#endif
#define NODE_T abtree_ns::Node<FAT_NODE_DEGREE, K>
#define RECORD_MANAGER_T record_manager<Reclaim, Alloc, Pool, NODE_T>
#define DATA_STRUCTURE_T abtree_ns::abtree<FAT_NODE_DEGREE, K, std::less<K>, RECORD_MANAGER_T>
template <typename K, typename V, class Reclaim = reclaimer_debra<K>, class Alloc = allocator_new<K>, class Pool = pool_none<K>>
class ds_adapter {
private:
DATA_STRUCTURE_T * const ds;
public:
ds_adapter(const int NUM_THREADS,
const K& KEY_ANY,
const K& unused1,
const V& unused2,
Random64 * const unused3)
: ds(new DATA_STRUCTURE_T(NUM_THREADS, KEY_ANY))
{
if (sizeof(V) > sizeof(void *)) {
setbench_error("Value type V is too large to fit in void *. This data structure stores all values in fields of type void *, so this is a problem.");
}
if (NUM_THREADS > MAX_THREADS_POW2) {
setbench_error("NUM_THREADS exceeds MAX_THREADS_POW2");
}
}
~ds_adapter() {
delete ds;
}
void * getNoValue() {
return ds->NO_VALUE;
}
void initThread(const int tid) {
ds->initThread(tid);
}
void deinitThread(const int tid) {
ds->deinitThread(tid);
}
bool contains(const int tid, const K& key) {
return ds->contains(tid, key);
}
V insert(const int tid, const K& key, const V& val) {
return (V) ds->insert(tid, key, val);
}
V insertIfAbsent(const int tid, const K& key, const V& val) {
return (V) ds->insertIfAbsent(tid, key, val);
}
V erase(const int tid, const K& key) {
return (V) ds->erase(tid, key).first;
}
V find(const int tid, const K& key) {
return (V) ds->find(tid, key).first;
}
int rangeQuery(const int tid, const K& lo, const K& hi, K * const resultKeys, V * const resultValues) {
return ds->rangeQuery(tid, lo, hi, resultKeys, (void ** const) resultValues);
}
void printSummary() {
ds->debugGetRecMgr()->printStatus();
}
bool validateStructure() {
return true;
}
void printObjectSizes() {
std::cout<<"size_node="<<(sizeof(NODE_T))<<std::endl;
}
// try to clean up: must only be called by a single thread as part of the test harness!
void debugGCSingleThreaded() {
ds->debugGetRecMgr()->debugGCSingleThreaded();
}
#ifdef USE_TREE_STATS
class NodeHandler {
public:
typedef NODE_T * NodePtrType;
K minKey;
K maxKey;
NodeHandler(const K& _minKey, const K& _maxKey) {
minKey = _minKey;
maxKey = _maxKey;
}
class ChildIterator {
private:
size_t ix;
NodePtrType node; // node being iterated over
public:
ChildIterator(NodePtrType _node) { node = _node; ix = 0; }
bool hasNext() { return ix < node->size; }
NodePtrType next() { return node->ptrs[ix++]; }
};
static bool isLeaf(NodePtrType node) { return node->leaf; }
static ChildIterator getChildIterator(NodePtrType node) { return ChildIterator(node); }
static size_t getNumChildren(NodePtrType node) { return node->size; }
static size_t getNumKeys(NodePtrType node) { return isLeaf(node) ? node->size : 0; }
static size_t getSumOfKeys(NodePtrType node) {
size_t sz = getNumKeys(node);
size_t result = 0;
for (size_t i=0;i<sz;++i) {
result += (size_t) node->keys[i];
}
return result;
}
static size_t getSizeInBytes(NodePtrType node) { return sizeof(*node); }
};
TreeStats<NodeHandler> * createTreeStats(const K& _minKey, const K& _maxKey) {
return new TreeStats<NodeHandler>(new NodeHandler(_minKey, _maxKey), ds->debug_getEntryPoint(), true);
}
#endif
private:
template<typename... Arguments>
void iterate_helper_fn(int depth, void (*callback)(K key, V value, Arguments... args)
, NODE_T * node, Arguments... args) {
if (node == NULL) return;
if (node->leaf) {
for (int i=0;i<node->getABDegree();++i) {
K key = node->keys[i];
V val = (V) node->ptrs[i];
callback(key, val, args...);
}
return;
}
for (int i=0;i<node->getABDegree();++i) {
if (depth == 4) {
#pragma omp task
iterate_helper_fn(1+depth, callback, (NODE_T *) node->ptrs[i], args...);
} else {
iterate_helper_fn(1+depth, callback, (NODE_T *) node->ptrs[i], args...);
}
}
}
public:
#define DS_ADAPTER_SUPPORTS_TERMINAL_ITERATE
template<typename... Arguments>
void iterate(void (*callback)(K key, V value, Arguments... args), Arguments... args) {
#pragma omp parallel
{
#pragma omp single
iterate_helper_fn(0, callback, ds->debug_getEntryPoint(), args...);
}
}
};
#undef RECORD_MANAGER_T
#undef DATA_STRUCTURE_T
#undef FAT_NODE_DEGREE
#endif
|
do_data_tstep.c | /*
** NAME
do_data_tstep -- run model for 1 data timestep between 2 input records
**
** SYNOPSIS
** #include "snobal.h"
**
** int
** do_data_tstep(void)
**
** DESCRIPTION
** This routine performs the model's calculations for 1 data timestep
** between 2 input-data records which are in 'input_rec1' and
** 'input_rec2'.
**
** If there's precipitation during the data timestep, the flag
** 'precip_now' used be TRUE. Furthermore, the routine requires
** that the following precipitation variables have been initialized:
**
** m_pp
** percent_snow
** rho_snow
** T_pp
**
** This routine divides the data timestep into the appropriate number
** of normal run timesteps. The input values for each normal timestep
** are computed from the two input records by linear interpolation.
**
** If output is desired for any of the run timesteps (normal, medium,
** or small), the appropriate output flags must be set in the proper
** timestep's record (i.e., the array 'tstep_info'). If any output
** flag is set, the routine requires that the global variable 'out_func'
** point to appropriate output function.
**
** This routine may return in the middle of a data timestep if:
**
** a) the output function pointed to by 'out_func' is called, and
** b) the flag 'run_no_snow' is FALSE, and
** c) there is no snow remaining on the ground at the end of
** timestep
**
** In this happens, the flag 'stop_no_snow' is set to TRUE.
**
** RETURN VALUE
**
** TRUE The model's calculations were completed.
**
** FALSE An error occured, and a message explaining the error has
** been stored with the 'usrerr' routine.
**
** GLOBAL VARIABLES READ
** e_a
** I_lw
** in_rec
** layer_count
** m_pp_data
** m_rain_data
** m_snow_data
** more_pr_recs
** precip_data
** ro
** ro_data
** run_no_snow
** S_n
** T_a
** T_g
** tstep_info
** u
** z_snow_data
**
** GLOBAL VARIABLES MODIFIED
** precip_now
** stop_no_snow
*/
#include <omp.h>
//#include "ipw.h"
#include "envphys.h"
#include "_snobal.h"
int
do_data_tstep(void)
{
// static PRECIP_REC *pp_info = precip_info;
/* precip info for data timestep */
// static TSTEP_REC *data_tstep = tstep_info;
/* timestep info for data timestep */
static PRECIP_REC *pp_info;
#pragma omp threadprivate(pp_info)
pp_info = precip_info;
static TSTEP_REC *data_tstep;
#pragma omp threadprivate(data_tstep)
data_tstep = tstep_info;
// printf("%i -- %i -- %f -- %f\n", tstep_info[0].level, tstep_info[0].time_step, tstep_info[0].intervals, tstep_info[0].threshold);
// printf("%i -- %i -- %f -- %f\n", tstep_info[1].level, tstep_info[1].time_step, tstep_info[1].intervals, tstep_info[1].threshold);
// printf("%i -- %i -- %f -- %f\n", tstep_info[2].level, tstep_info[2].time_step, tstep_info[2].intervals, tstep_info[2].threshold);
// printf("%i -- %i -- %f -- %f\n", tstep_info[3].level, tstep_info[3].time_step, tstep_info[3].intervals, tstep_info[3].threshold);
//
// printf("%i -- %i -- %f -- %f\n", data_tstep[0].level, data_tstep[0].time_step, data_tstep[0].intervals, data_tstep[0].threshold);
// printf("%i -- %i -- %f -- %f\n", data_tstep[1].level, data_tstep[1].time_step, data_tstep[1].intervals, data_tstep[1].threshold);
// printf("%i -- %i -- %f -- %f\n", data_tstep[2].level, data_tstep[2].time_step, data_tstep[2].intervals, data_tstep[2].threshold);
// printf("%i -- %i -- %f -- %f\n", data_tstep[3].level, data_tstep[3].time_step, data_tstep[3].intervals, data_tstep[3].threshold);
int level; /* loop index */
/*
* Copy values from first input record into global variables.
*/
S_n = input_rec1.S_n;
I_lw = input_rec1.I_lw;
T_a = input_rec1.T_a;
e_a = input_rec1.e_a;
u = input_rec1.u ;
T_g = input_rec1.T_g;
if (ro_data)
ro = input_rec1.ro;
// printf("%f - %f - %f - %f - %f - %f\n", S_n, I_lw, T_a, e_a, u, T_g);
// printf("%f\n", max_h2o_vol);
// printf("%i %i %i %i\n", DATA_TSTEP, NORMAL_TSTEP, MEDIUM_TSTEP, SMALL_TSTEP);
/*
* Compute deltas for the climate input parameters over
* the data timestep.
*/
input_deltas[DATA_TSTEP].S_n = input_rec2.S_n - input_rec1.S_n;
input_deltas[DATA_TSTEP].I_lw = input_rec2.I_lw - input_rec1.I_lw;
input_deltas[DATA_TSTEP].T_a = input_rec2.T_a - input_rec1.T_a;
input_deltas[DATA_TSTEP].e_a = input_rec2.e_a - input_rec1.e_a;
input_deltas[DATA_TSTEP].u = input_rec2.u - input_rec1.u;
input_deltas[DATA_TSTEP].T_g = input_rec2.T_g - input_rec1.T_g;
if (ro_data)
input_deltas[DATA_TSTEP].ro = input_rec2.ro - input_rec1.ro;
/*
* If there is precipitation, then compute the amount of rain &
* snow in it.
*/
if (precip_now) {
pp_info->m_pp = m_pp;
pp_info->m_snow = percent_snow * m_pp;
pp_info->m_rain = m_pp - pp_info->m_snow;
if (pp_info->m_snow > 0.0) {
if (rho_snow > 0.0)
pp_info->z_snow = pp_info->m_snow / rho_snow;
else {
// usrerr("rho_snow is <= 0.0 with %_snow > 0.0");
fprintf(stderr, "rho_snow is <= 0.0 with %_snow > 0.0");
return FALSE;
}
}
else
pp_info->z_snow = 0.0;
/*
* Mixed snow and rain
*/
if ((pp_info->m_snow > 0.0) && (pp_info->m_rain > 0.0)) {
T_snow = FREEZE;
h2o_sat_snow = 1.0;
T_rain = T_pp;
}
/*
* Snow only
*/
else if (pp_info->m_snow > 0.0) {
if (T_pp < FREEZE) { /* Cold snow */
T_snow = T_pp;
h2o_sat_snow = 0.0;
}
else { /* Warm snow */
T_snow = FREEZE;
h2o_sat_snow = 1.0;
}
}
/*
* Rain only
*/
else if (pp_info->m_rain > 0.0) {
T_rain = T_pp;
}
}
/*
* Clear the 'computed' flag at the other timestep levels.
*/
for (level = NORMAL_TSTEP; level <= SMALL_TSTEP; level++)
computed[level] = FALSE;
/*
* Divide the data timestep into normal run timesteps.
*/
return _divide_tstep(data_tstep);
}
|
ncpdq.c | /* $Header$ */
/* ncpdq -- netCDF pack, re-dimension, query */
/* Purpose: Pack, re-dimension, query single netCDF file and output to a single file */
/* Copyright (C) 1995--present Charlie Zender
This file is part of NCO, the netCDF Operators. NCO is free software.
You may redistribute and/or modify NCO under the terms of the
3-Clause BSD License.
You are permitted to link NCO with the HDF, netCDF, OPeNDAP, and UDUnits
libraries and to distribute the resulting executables under the terms
of the BSD, but in addition obeying the extra stipulations of the
HDF, netCDF, OPeNDAP, and UDUnits licenses.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the 3-Clause BSD License for more details.
The original author of this software, Charlie Zender, seeks to improve
it with your suggestions, contributions, bug-reports, and patches.
Please contact the NCO project at http://nco.sf.net or write to
Charlie Zender
Department of Earth System Science
University of California, Irvine
Irvine, CA 92697-3100 */
/* Usage:
ncpdq -O -D 3 -a lat,lev,lon -v three_dmn_var ~/nco/data/in.nc ~/foo.nc;ncks -P ~/foo.nc
ncpdq -O -D 3 -a lon,lev,lat -v three_dmn_var ~/nco/data/in.nc ~/foo.nc;ncks -P ~/foo.nc
ncpdq -O -D 3 -a lon,time -x -v three_double_dmn ~/nco/data/in.nc ~/foo.nc;ncks -P ~/foo.nc
ncpdq -O -D 3 -P all_new ~/nco/data/in.nc ~/foo.nc
ncpdq -O -D 3 -P all_xst ~/nco/data/in.nc ~/foo.nc
ncpdq -O -D 3 -P xst_new ~/nco/data/in.nc ~/foo.nc
ncpdq -O -D 3 -M dbl_flt ~/nco/data/in.nc ~/foo.nc
ncpdq -O -D 3 -M flt_dbl ~/nco/data/in.nc ~/foo.nc
ncpdq -O -D 3 -P upk ~/nco/data/in.nc ~/foo.nc
ncpdq -O -D 3 -a lon,lat -g g21,g22 ~/nco/data/in_grp_3.nc ~/foo.nc
ncpdq -O -D 3 -g g1 -v v1 --union -G dude -p ~/nco/data in_grp.nc ~/foo.nc */
#ifdef HAVE_CONFIG_H
# include <config.h> /* Autotools tokens */
#endif /* !HAVE_CONFIG_H */
/* Standard C headers */
#include <math.h> /* sin cos cos sin 3.14159 */
#include <stdio.h> /* stderr, FILE, NULL, etc. */
#include <stdlib.h> /* atof, atoi, malloc, getopt */
#include <string.h> /* strcmp() */
#include <time.h> /* machine time */
#ifndef _MSC_VER
# include <unistd.h> /* POSIX stuff */
#endif
#ifndef HAVE_GETOPT_LONG
# include "nco_getopt.h"
#else /* HAVE_GETOPT_LONG */
# ifdef HAVE_GETOPT_H
# include <getopt.h>
# endif /* !HAVE_GETOPT_H */
#endif /* HAVE_GETOPT_LONG */
#ifdef I18N
# include <langinfo.h> /* nl_langinfo() */
# include <libintl.h> /* Internationalization i18n */
# include <locale.h> /* Locale setlocale() */
# define _(sng) gettext (sng)
# define gettext_noop(sng) (sng)
# define N_(sng) gettext_noop(sng)
#endif /* I18N */
/* Supply stub gettext() function in case i18n failed */
#ifndef _LIBINTL_H
# define gettext(foo) foo
#endif /* _LIBINTL_H */
/* 3rd party vendors */
#include <netcdf.h> /* netCDF definitions and C library */
#ifdef ENABLE_MPI
# include <mpi.h> /* MPI definitions */
# include <netcdf_par.h> /* Parallel netCDF definitions */
# include "nco_mpi.h" /* MPI utilities */
#endif /* !ENABLE_MPI */
/* #define MAIN_PROGRAM_FILE MUST precede #include libnco.h */
#define MAIN_PROGRAM_FILE
#include "libnco.h" /* netCDF Operator (NCO) library */
int
main(int argc,char **argv)
{
aed_sct *aed_lst_add_fst=NULL_CEWI;
aed_sct *aed_lst_scl_fct=NULL_CEWI;
char **dmn_rdr_lst_in=NULL_CEWI; /* Option a */
char **fl_lst_abb=NULL; /* Option n */
char **fl_lst_in=NULL_CEWI;
char **gaa_arg=NULL; /* [sng] Global attribute arguments */
char **var_lst_in=NULL_CEWI;
char **grp_lst_in=NULL_CEWI;
char *aux_arg[NC_MAX_DIMS];
char *cmd_ln;
char *cnk_arg[NC_MAX_DIMS];
char *cnk_map_sng=NULL_CEWI; /* [sng] Chunking map */
char *cnk_plc_sng=NULL_CEWI; /* [sng] Chunking policy */
char *fl_in=NULL;
char *fl_out=NULL; /* Option o */
char *fl_out_tmp=NULL_CEWI;
char *fl_pth=NULL; /* Option p */
char *fl_pth_lcl=NULL; /* Option l */
char *lmt_arg[NC_MAX_DIMS];
char *nco_pck_plc_sng=NULL_CEWI; /* [sng] Packing policy Option P */
char *nco_pck_map_sng=NULL_CEWI; /* [sng] Packing map Option M */
char *opt_crr=NULL; /* [sng] String representation of current long-option name */
char *optarg_lcl; /* [sng] Local copy of system optarg */
char *ppc_arg[NC_MAX_VARS]; /* [sng] PPC arguments */
char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */
char add_fst_sng[]="add_offset"; /* [sng] Unidata standard string for add offset */
char scl_fct_sng[]="scale_factor"; /* [sng] Unidata standard string for scale factor */
char trv_pth[]="/"; /* [sng] Root path of traversal tree */
const char * const CVS_Id="$Id$";
const char * const CVS_Revision="$Revision$";
const char * const opt_sht_lst="34567Aa:CcD:d:Fg:G:hL:l:M:Oo:P:p:Rrt:v:UxZ-:";
cnk_sct cnk; /* [sct] Chunking structure */
cnv_sct *cnv; /* [sct] Convention structure */
#if defined(__cplusplus) || defined(PGI_CC)
ddra_info_sct ddra_info;
ddra_info.flg_ddra=False;
#else /* !__cplusplus */
ddra_info_sct ddra_info={.flg_ddra=False};
#endif /* !__cplusplus */
dmn_sct **dmn_rdr_trv=NULL; /* [sct] Dimension structures to be re-ordered (from global table) */
extern char *optarg;
extern int optind;
/* Using naked stdin/stdout/stderr in parallel region generates warning
Copy appropriate filehandle to variable scoped shared in parallel clause */
FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */
FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */
gpe_sct *gpe=NULL; /* [sng] Group Path Editing (GPE) structure */
int *in_id_arr;
int abb_arg_nbr=0;
int aux_nbr=0; /* [nbr] Number of auxiliary coordinate hyperslabs specified */
int cnk_map=nco_cnk_map_nil; /* [enm] Chunking map */
int cnk_nbr=0; /* [nbr] Number of chunk sizes */
int cnk_plc=nco_cnk_plc_nil; /* [enm] Chunking policy */
int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */
int dmn_rdr_nbr=0; /* [nbr] Number of dimension to re-order */
int dmn_rdr_nbr_trv=0; /* [nbr] Number of dimension to re-order (from global table) */
int dmn_rdr_nbr_in=0; /* [nbr] Original number of dimension to re-order */
int fl_idx=int_CEWI;
int fl_nbr=0;
int fl_in_fmt; /* [enm] Input file format */
int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */
int fll_md_old; /* [enm] Old fill mode */
int gaa_nbr=0; /* [nbr] Number of global attributes to add */
int idx=int_CEWI;
int idx_rdr=int_CEWI;
int in_id;
int lmt_nbr=0; /* Option d. NB: lmt_nbr gets incremented */
int log_lvl=0; /* [enm] netCDF library debugging verbosity [0..5] */
int md_open; /* [enm] Mode flag for nc_open() call */
int nbr_dmn_fl;
int nbr_var_fix; /* nbr_var_fix gets incremented */
int nbr_var_fl;
int nbr_var_prc; /* nbr_var_prc gets incremented */
int nco_pck_map=nco_pck_map_flt_sht; /* [enm] Packing map */
int nco_pck_plc=nco_pck_plc_nil; /* [enm] Packing policy */
int opt;
int out_id;
int ppc_nbr=0; /* [nbr] Number of PPC arguments */
int rcd=NC_NOERR; /* [rcd] Return code */
int thr_idx; /* [idx] Index of current thread */
int thr_nbr=int_CEWI; /* [nbr] Thread number Option t */
int xtr_nbr=0; /* xtr_nbr won't otherwise be set for -c with no -v */
int var_lst_in_nbr=0;
int grp_lst_in_nbr=0; /* [nbr] Number of groups explicitly specified by user */
md5_sct *md5=NULL; /* [sct] MD5 configuration */
nco_bool *dmn_rvr_rdr=NULL; /* [flg] Reverse dimensions */
nco_bool EXCLUDE_INPUT_LIST=False; /* Option c */
nco_bool EXTRACT_ALL_COORDINATES=False; /* Option c */
nco_bool EXTRACT_ASSOCIATED_COORDINATES=True; /* Option C */
nco_bool EXTRACT_CLL_MSR=True; /* [flg] Extract cell_measures variables */
nco_bool EXTRACT_FRM_TRM=True; /* [flg] Extract formula_terms variables */
nco_bool FL_RTR_RMT_LCN;
nco_bool FL_LST_IN_FROM_STDIN=False; /* [flg] fl_lst_in comes from stdin */
nco_bool FORCE_APPEND=False; /* Option A */
nco_bool FORCE_OVERWRITE=False; /* Option O */
nco_bool FORTRAN_IDX_CNV=False; /* Option F */
nco_bool GRP_VAR_UNN=False; /* [flg] Select union of specified groups and variables */
nco_bool HISTORY_APPEND=True; /* Option h */
nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */
nco_bool IS_REORDER=False; /* Re-order mode */
nco_bool MSA_USR_RDR=False; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order*/
nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */
nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */
nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */
nco_bool WRT_TMP_FL=True; /* [flg] Write output to temporary file */
nco_bool flg_mmr_cln=True; /* [flg] Clean memory prior to exit */
nco_bool flg_dmn_prc_usr_spc=False; /* [flg] Processed dimensions specified on command line */
size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */
size_t cnk_csh_byt=NCO_CNK_CSH_BYT_DFL; /* [B] Chunk cache size */
size_t cnk_min_byt=NCO_CNK_SZ_MIN_BYT_DFL; /* [B] Minimize size of variable to chunk */
size_t cnk_sz_byt=0UL; /* [B] Chunk size in bytes */
size_t cnk_sz_scl=0UL; /* [nbr] Chunk size scalar */
size_t hdr_pad=0UL; /* [B] Pad at end of header section */
var_sct **var;
var_sct **var_fix;
var_sct **var_fix_out;
var_sct **var_out;
var_sct **var_prc;
var_sct **var_prc_out;
trv_tbl_sct *trv_tbl=NULL; /* [lst] Traversal table */
nco_dmn_dne_t *flg_dne=NULL; /* [lst] Flag to check if input dimension -d "does not exist" */
#ifdef ENABLE_MPI
/* Declare all MPI-specific variables here */
MPI_Comm mpi_cmm=MPI_COMM_WORLD; /* [prc] Communicator */
int prc_rnk; /* [idx] Process rank */
int prc_nbr=0; /* [nbr] Number of MPI processes */
#endif /* !ENABLE_MPI */
static struct option opt_lng[]={ /* Structure ordered by short option key if possible */
/* Long options with no argument, no short option counterpart */
{"cll_msr",no_argument,0,0}, /* [flg] Extract cell_measures variables */
{"cell_measures",no_argument,0,0}, /* [flg] Extract cell_measures variables */
{"no_cll_msr",no_argument,0,0}, /* [flg] Do not extract cell_measures variables */
{"no_cell_measures",no_argument,0,0}, /* [flg] Do not extract cell_measures variables */
{"frm_trm",no_argument,0,0}, /* [flg] Extract formula_terms variables */
{"formula_terms",no_argument,0,0}, /* [flg] Extract formula_terms variables */
{"no_frm_trm",no_argument,0,0}, /* [flg] Do not extract formula_terms variables */
{"no_formula_terms",no_argument,0,0}, /* [flg] Do not extract formula_terms variables */
{"clean",no_argument,0,0}, /* [flg] Clean memory prior to exit */
{"mmr_cln",no_argument,0,0}, /* [flg] Clean memory prior to exit */
{"drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"dirty",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"mmr_drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"hdf4",no_argument,0,0}, /* [flg] Treat file as HDF4 */
{"hdf_upk",no_argument,0,0}, /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */
{"hdf_unpack",no_argument,0,0}, /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */
{"help",no_argument,0,0},
{"hlp",no_argument,0,0},
{"hpss_try",no_argument,0,0}, /* [flg] Search HPSS for unfound files */
{"mrd",no_argument,0,0}, /* [enm] Multiple Record Dimension convention */
{"multiple_record_dimension",no_argument,0,0}, /* [enm] Multiple Record Dimension convention */
{"msa_usr_rdr",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
{"msa_user_order",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
{"ram_all",no_argument,0,0}, /* [flg] Open (netCDF3) and create file(s) in RAM */
{"create_ram",no_argument,0,0}, /* [flg] Create file in RAM */
{"open_ram",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) in RAM */
{"diskless_all",no_argument,0,0}, /* [flg] Open (netCDF3) and create file(s) in RAM */
{"wrt_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */
{"write_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */
{"no_tmp_fl",no_argument,0,0}, /* [flg] Do not write output to temporary file */
{"intersection",no_argument,0,0}, /* [flg] Select intersection of specified groups and variables */
{"nsx",no_argument,0,0}, /* [flg] Select intersection of specified groups and variables */
{"union",no_argument,0,0}, /* [flg] Select union of specified groups and variables */
{"unn",no_argument,0,0}, /* [flg] Select union of specified groups and variables */
{"version",no_argument,0,0},
{"vrs",no_argument,0,0},
/* Long options with argument, no short option counterpart */
{"bfr_sz_hnt",required_argument,0,0}, /* [B] Buffer size hint */
{"buffer_size_hint",required_argument,0,0}, /* [B] Buffer size hint */
{"cnk_byt",required_argument,0,0}, /* [B] Chunk size in bytes */
{"chunk_byte",required_argument,0,0}, /* [B] Chunk size in bytes */
{"cnk_csh",required_argument,0,0}, /* [B] Chunk cache size in bytes */
{"chunk_cache",required_argument,0,0}, /* [B] Chunk cache size in bytes */
{"cnk_dmn",required_argument,0,0}, /* [nbr] Chunk size */
{"chunk_dimension",required_argument,0,0}, /* [nbr] Chunk size */
{"cnk_map",required_argument,0,0}, /* [nbr] Chunking map */
{"chunk_map",required_argument,0,0}, /* [nbr] Chunking map */
{"cnk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */
{"chunk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */
{"cnk_plc",required_argument,0,0}, /* [nbr] Chunking policy */
{"chunk_policy",required_argument,0,0}, /* [nbr] Chunking policy */
{"cnk_scl",required_argument,0,0}, /* [nbr] Chunk size scalar */
{"chunk_scalar",required_argument,0,0}, /* [nbr] Chunk size scalar */
{"fl_fmt",required_argument,0,0},
{"file_format",required_argument,0,0},
{"gaa",required_argument,0,0}, /* [sng] Global attribute add */
{"glb_att_add",required_argument,0,0}, /* [sng] Global attribute add */
{"hdr_pad",required_argument,0,0},
{"header_pad",required_argument,0,0},
{"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
{"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
{"ppc",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */
{"precision_preserving_compression",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */
{"quantize",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */
{"upk",required_argument,0,0}, /* [enm] Unpacking convention to utilize */
/* Long options with short counterparts */
{"3",no_argument,0,'3'},
{"4",no_argument,0,'4'},
{"netcdf4",no_argument,0,'4'},
{"5",no_argument,0,'5'},
{"64bit_data",no_argument,0,'5'},
{"cdf5",no_argument,0,'5'},
{"pnetcdf",no_argument,0,'5'},
{"64bit_offset",no_argument,0,'6'},
{"7",no_argument,0,'7'},
{"append",no_argument,0,'A'},
{"arrange",required_argument,0,'a'},
{"permute",required_argument,0,'a'},
{"reorder",required_argument,0,'a'},
{"rdr",required_argument,0,'a'},
{"xtr_ass_var",no_argument,0,'c'},
{"xcl_ass_var",no_argument,0,'C'},
{"no_coords",no_argument,0,'C'},
{"no_crd",no_argument,0,'C'},
{"coords",no_argument,0,'c'},
{"crd",no_argument,0,'c'},
{"debug",required_argument,0,'D'},
{"nco_dbg_lvl",required_argument,0,'D'},
{"dimension",required_argument,0,'d'},
{"dmn",required_argument,0,'d'},
{"fortran",no_argument,0,'F'},
{"ftn",no_argument,0,'F'},
{"gpe",required_argument,0,'G'}, /* [sng] Group Path Edit (GPE) */
{"grp",required_argument,0,'g'},
{"group",required_argument,0,'g'},
{"history",no_argument,0,'h'},
{"hst",no_argument,0,'h'},
{"dfl_lvl",required_argument,0,'L'}, /* [enm] Deflate level */
{"deflate",required_argument,0,'L'}, /* [enm] Deflate level */
{"local",required_argument,0,'l'},
{"lcl",required_argument,0,'l'},
{"pack_map",required_argument,0,'M'},
{"pck_map",required_argument,0,'M'},
{"map",required_argument,0,'M'},
{"overwrite",no_argument,0,'O'},
{"ovr",no_argument,0,'O'},
{"output",required_argument,0,'o'},
{"fl_out",required_argument,0,'o'},
{"pack_policy",required_argument,0,'P'},
{"pck_plc",required_argument,0,'P'},
{"path",required_argument,0,'p'},
{"retain",no_argument,0,'R'},
{"rtn",no_argument,0,'R'},
{"revision",no_argument,0,'r'},
{"thr_nbr",required_argument,0,'t'},
{"threads",required_argument,0,'t'},
{"omp_num_threads",required_argument,0,'t'},
{"unpack",no_argument,0,'U'},
{"variable",required_argument,0,'v'},
{"auxiliary",required_argument,0,'X'},
{"exclude",no_argument,0,'x'},
{"xcl",no_argument,0,'x'},
{0,0,0,0}
}; /* end opt_lng */
int opt_idx=0; /* Index of current long option into opt_lng array */
/* Initialize traversal table */
trv_tbl_init(&trv_tbl);
/* Start timer and save command line */
ddra_info.tmr_flg=nco_tmr_srt;
rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info);
ddra_info.tmr_flg=nco_tmr_mtd;
cmd_ln=nco_cmd_ln_sng(argc,argv);
/* Get program name and set program enum (e.g., nco_prg_id=ncra) */
nco_prg_nm=nco_prg_prs(argv[0],&nco_prg_id);
#ifdef ENABLE_MPI
/* MPI Initialization */
if(False) (void)fprintf(stdout,gettext("%s: WARNING Compiled with MPI\n"),nco_prg_nm);
MPI_Init(&argc,&argv);
MPI_Comm_size(mpi_cmm,&prc_nbr);
MPI_Comm_rank(mpi_cmm,&prc_rnk);
#endif /* !ENABLE_MPI */
/* Parse command line arguments */
while(1){
/* getopt_long_only() allows one dash to prefix long options */
opt=getopt_long(argc,argv,opt_sht_lst,opt_lng,&opt_idx);
/* NB: access to opt_crr is only valid when long_opt is detected */
if(opt == EOF) break; /* Parse positional arguments once getopt_long() returns EOF */
opt_crr=(char *)strdup(opt_lng[opt_idx].name);
/* Process long options without short option counterparts */
if(opt == 0){
if(!strcmp(opt_crr,"bfr_sz_hnt") || !strcmp(opt_crr,"buffer_size_hint")){
bfr_sz_hnt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_byt") || !strcmp(opt_crr,"chunk_byte")){
cnk_sz_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_byt */
if(!strcmp(opt_crr,"cnk_csh") || !strcmp(opt_crr,"chunk_cache")){
cnk_csh_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_csh_byt */
if(!strcmp(opt_crr,"cnk_min") || !strcmp(opt_crr,"chunk_min")){
cnk_min_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_min */
if(!strcmp(opt_crr,"cnk_dmn") || !strcmp(opt_crr,"chunk_dimension")){
/* Copy limit argument for later processing */
cnk_arg[cnk_nbr]=(char *)strdup(optarg);
cnk_nbr++;
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_scl") || !strcmp(opt_crr,"chunk_scalar")){
cnk_sz_scl=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_map") || !strcmp(opt_crr,"chunk_map")){
/* Chunking map */
cnk_map_sng=(char *)strdup(optarg);
cnk_map=nco_cnk_map_get(cnk_map_sng);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_plc") || !strcmp(opt_crr,"chunk_policy")){
/* Chunking policy */
cnk_plc_sng=(char *)strdup(optarg);
cnk_plc=nco_cnk_plc_get(cnk_plc_sng);
} /* endif cnk */
if(!strcmp(opt_crr,"cll_msr") || !strcmp(opt_crr,"cell_measures")) EXTRACT_CLL_MSR=True; /* [flg] Extract cell_measures variables */
if(!strcmp(opt_crr,"no_cll_msr") || !strcmp(opt_crr,"no_cell_measures")) EXTRACT_CLL_MSR=False; /* [flg] Do not extract cell_measures variables */
if(!strcmp(opt_crr,"frm_trm") || !strcmp(opt_crr,"formula_terms")) EXTRACT_FRM_TRM=True; /* [flg] Extract formula_terms variables */
if(!strcmp(opt_crr,"no_frm_trm") || !strcmp(opt_crr,"no_formula_terms")) EXTRACT_FRM_TRM=False; /* [flg] Do not extract formula_terms variables */
if(!strcmp(opt_crr,"mmr_cln") || !strcmp(opt_crr,"clean")) flg_mmr_cln=True; /* [flg] Clean memory prior to exit */
if(!strcmp(opt_crr,"drt") || !strcmp(opt_crr,"mmr_drt") || !strcmp(opt_crr,"dirty")) flg_mmr_cln=False; /* [flg] Clean memory prior to exit */
if(!strcmp(opt_crr,"fl_fmt") || !strcmp(opt_crr,"file_format")) rcd=nco_create_mode_prs(optarg,&fl_out_fmt);
if(!strcmp(opt_crr,"gaa") || !strcmp(opt_crr,"glb_att_add")){
gaa_arg=(char **)nco_realloc(gaa_arg,(gaa_nbr+1)*sizeof(char *));
gaa_arg[gaa_nbr++]=(char *)strdup(optarg);
} /* endif gaa */
if(!strcmp(opt_crr,"hdf4")) nco_fmt_xtn=nco_fmt_xtn_hdf4; /* [enm] Treat file as HDF4 */
if(!strcmp(opt_crr,"hdf_upk") || !strcmp(opt_crr,"hdf_unpack")) nco_upk_cnv=nco_upk_HDF_MOD10; /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */
if(!strcmp(opt_crr,"hdr_pad") || !strcmp(opt_crr,"header_pad")){
hdr_pad=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif "hdr_pad" */
if(!strcmp(opt_crr,"help") || !strcmp(opt_crr,"hlp")){
(void)nco_usg_prn();
nco_exit(EXIT_SUCCESS);
} /* endif "help" */
if(!strcmp(opt_crr,"hpss_try")) HPSS_TRY=True; /* [flg] Search HPSS for unfound files */
if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){
log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
nc_set_log_level(log_lvl);
} /* !log_lvl */
if(!strcmp(opt_crr,"mrd") || !strcmp(opt_crr,"multiple_record_dimension")) nco_mrd_cnv=nco_mrd_allow; /* [enm] Multiple Record Dimension convention */
if(!strcmp(opt_crr,"msa_usr_rdr") || !strcmp(opt_crr,"msa_user_order")) MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
if(!strcmp(opt_crr,"ppc") || !strcmp(opt_crr,"precision_preserving_compression") || !strcmp(opt_crr,"quantize")){
ppc_arg[ppc_nbr]=(char *)strdup(optarg);
ppc_nbr++;
} /* endif "ppc" */
if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"create_ram") || !strcmp(opt_crr,"diskless_all")) RAM_CREATE=True; /* [flg] Open (netCDF3) file(s) in RAM */
if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"open_ram") || !strcmp(opt_crr,"diskless_all")) RAM_OPEN=True; /* [flg] Create file in RAM */
if(!strcmp(opt_crr,"unn") || !strcmp(opt_crr,"union")) GRP_VAR_UNN=True;
if(!strcmp(opt_crr,"nsx") || !strcmp(opt_crr,"intersection")) GRP_VAR_UNN=False;
if(!strcmp(opt_crr,"upk")){ /* [enm] Unpacking convention to utilize */
nco_upk_cnv=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
} /* endif "hdr_pad" */
if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){
log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
nc_set_log_level(log_lvl);
} /* !log_lvl */
if(!strcmp(opt_crr,"vrs") || !strcmp(opt_crr,"version")){
(void)nco_vrs_prn(CVS_Id,CVS_Revision);
nco_exit(EXIT_SUCCESS);
} /* endif "vrs" */
if(!strcmp(opt_crr,"wrt_tmp_fl") || !strcmp(opt_crr,"write_tmp_fl")) WRT_TMP_FL=True;
if(!strcmp(opt_crr,"no_tmp_fl")) WRT_TMP_FL=False;
} /* opt != 0 */
/* Process short options */
switch(opt){
case 0: /* Long options have already been processed, return */
break;
case '3': /* Request netCDF3 output storage format */
fl_out_fmt=NC_FORMAT_CLASSIC;
break;
case '4': /* Request netCDF4 output storage format */
fl_out_fmt=NC_FORMAT_NETCDF4;
break;
case '5': /* Request netCDF3 64-bit offset+data storage (i.e., pnetCDF) format */
fl_out_fmt=NC_FORMAT_CDF5;
break;
case '6': /* Request netCDF3 64-bit offset output storage format */
fl_out_fmt=NC_FORMAT_64BIT_OFFSET;
break;
case '7': /* Request netCDF4-classic output storage format */
fl_out_fmt=NC_FORMAT_NETCDF4_CLASSIC;
break;
case 'A': /* Toggle FORCE_APPEND */
FORCE_APPEND=!FORCE_APPEND;
break;
case 'a': /* Re-order dimensions */
flg_dmn_prc_usr_spc=True;
dmn_rdr_lst_in=nco_lst_prs_2D(optarg,",",&dmn_rdr_nbr_in);
dmn_rdr_nbr=dmn_rdr_nbr_in;
break;
case 'C': /* Extract all coordinates associated with extracted variables? */
EXTRACT_ASSOCIATED_COORDINATES=False;
break;
case 'c':
EXTRACT_ALL_COORDINATES=True;
break;
case 'D': /* Debugging level. Default is 0. */
nco_dbg_lvl=(unsigned short int)strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
break;
case 'd': /* Copy limit argument for later processing */
lmt_arg[lmt_nbr]=(char *)strdup(optarg);
lmt_nbr++;
break;
case 'F': /* Toggle index convention. Default is 0-based arrays (C-style). */
FORTRAN_IDX_CNV=!FORTRAN_IDX_CNV;
break;
case 'G': /* Apply Group Path Editing (GPE) to output group */
/* NB: GNU getopt() optional argument syntax is ugly (requires "=" sign) so avoid it
http://stackoverflow.com/questions/1052746/getopt-does-not-parse-optional-arguments-to-parameters */
gpe=nco_gpe_prs_arg(optarg);
fl_out_fmt=NC_FORMAT_NETCDF4;
break;
case 'g': /* Copy group argument for later processing */
/* Replace commas with hashes when within braces (convert back later) */
optarg_lcl=(char *)strdup(optarg);
(void)nco_rx_comma2hash(optarg_lcl);
grp_lst_in=nco_lst_prs_2D(optarg_lcl,",",&grp_lst_in_nbr);
optarg_lcl=(char *)nco_free(optarg_lcl);
break;
case 'h': /* Toggle appending to history global attribute */
HISTORY_APPEND=!HISTORY_APPEND;
break;
case 'L': /* [enm] Deflate level. Default is 0. */
dfl_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
break;
case 'l': /* Local path prefix for files retrieved from remote file system */
fl_pth_lcl=(char *)strdup(optarg);
break;
case 'M': /* Packing map */
nco_pck_map_sng=(char *)strdup(optarg);
nco_pck_map=nco_pck_map_get(nco_pck_map_sng);
break;
case 'O': /* Toggle FORCE_OVERWRITE */
FORCE_OVERWRITE=!FORCE_OVERWRITE;
break;
case 'o': /* Name of output file */
fl_out=(char *)strdup(optarg);
break;
case 'P': /* Packing policy */
nco_pck_plc_sng=(char *)strdup(optarg);
break;
case 'p': /* Common file path */
fl_pth=(char *)strdup(optarg);
break;
case 'R': /* Toggle removal of remotely-retrieved-files. Default is True. */
RM_RMT_FL_PST_PRC=!RM_RMT_FL_PST_PRC;
break;
case 'r': /* Print CVS program information and copyright notice */
(void)nco_vrs_prn(CVS_Id,CVS_Revision);
(void)nco_lbr_vrs_prn();
(void)nco_cpy_prn();
(void)nco_cnf_prn();
nco_exit(EXIT_SUCCESS);
break;
case 't': /* Thread number */
thr_nbr=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
break;
case 'U': /* Unpacking switch */
nco_pck_plc_sng=(char *)strdup("upk");
break;
case 'v': /* Variables to extract/exclude */
/* Replace commas with hashes when within braces (convert back later) */
optarg_lcl=(char *)strdup(optarg);
(void)nco_rx_comma2hash(optarg_lcl);
var_lst_in=nco_lst_prs_2D(optarg_lcl,",",&var_lst_in_nbr);
optarg_lcl=(char *)nco_free(optarg_lcl);
xtr_nbr=var_lst_in_nbr;
break;
case 'X': /* Copy auxiliary coordinate argument for later processing */
aux_arg[aux_nbr]=(char *)strdup(optarg);
aux_nbr++;
MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
break;
case 'x': /* Exclude rather than extract variables specified with -v */
EXCLUDE_INPUT_LIST=True;
break;
case '?': /* Question mark means unrecognized option, print proper usage then EXIT_FAILURE */
(void)fprintf(stdout,"%s: ERROR in command-line syntax/options. Missing or unrecognized option. Please reformulate command accordingly.\n",nco_prg_nm_get());
(void)nco_usg_prn();
nco_exit(EXIT_FAILURE);
break;
case '-': /* Long options are not allowed */
(void)fprintf(stderr,"%s: ERROR Long options are not available in this build. Use single letter options instead.\n",nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
break;
default: /* Print proper usage */
(void)fprintf(stdout,"%s ERROR in command-line syntax/options. Please reformulate command accordingly.\n",nco_prg_nm_get());
(void)nco_usg_prn();
nco_exit(EXIT_FAILURE);
break;
} /* end switch */
if(opt_crr) opt_crr=(char *)nco_free(opt_crr);
} /* end while loop */
/* Set/report global chunk cache */
rcd+=nco_cnk_csh_ini(cnk_csh_byt);
/* Set re-order flag */
if(dmn_rdr_nbr > 0) IS_REORDER=True;
/* No re-order dimensions specified implies packing request */
if(dmn_rdr_nbr == 0){
if(nco_pck_plc == nco_pck_plc_nil) nco_pck_plc=nco_pck_plc_get(nco_pck_plc_sng);
if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: DEBUG Packing map is %s and packing policy is %s\n",nco_prg_nm_get(),nco_pck_map_sng_get(nco_pck_map),nco_pck_plc_sng_get(nco_pck_plc));
} /* dmn_rdr_nbr != 0 */
/* From this point forward, assume ncpdq operator packs or re-orders, not both */
if(dmn_rdr_nbr > 0 && nco_pck_plc != nco_pck_plc_nil){
(void)fprintf(fp_stdout,"%s: ERROR %s does not support simultaneous dimension re-ordering (-a switch) and packing (-P switch).\nHINT: Invoke %s twice, once to re-order (with -a), and once to pack (with -P).\n",nco_prg_nm,nco_prg_nm,nco_prg_nm);
nco_exit(EXIT_FAILURE);
} /* endif */
/* Process positional arguments and fill-in filenames */
fl_lst_in=nco_fl_lst_mk(argv,argc,optind,&fl_nbr,&fl_out,&FL_LST_IN_FROM_STDIN,FORCE_OVERWRITE);
/* Initialize thread information */
thr_nbr=nco_openmp_ini(thr_nbr);
in_id_arr=(int *)nco_malloc(thr_nbr*sizeof(int));
/* Parse filename */
fl_in=nco_fl_nm_prs(fl_in,0,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth);
/* Make sure file is on local system and is readable or die trying */
fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
/* Open file using appropriate buffer size hints and verbosity */
if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE;
rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id);
/* Get file format */
(void)nco_inq_format(in_id,&fl_in_fmt);
/* Construct GTT, Group Traversal Table (groups,variables,dimensions, limits) */
(void)nco_bld_trv_tbl(in_id,trv_pth,lmt_nbr,lmt_arg,aux_nbr,aux_arg,MSA_USR_RDR,FORTRAN_IDX_CNV,grp_lst_in,grp_lst_in_nbr,var_lst_in,xtr_nbr,EXTRACT_ALL_COORDINATES,GRP_VAR_UNN,False,EXCLUDE_INPUT_LIST,EXTRACT_ASSOCIATED_COORDINATES,EXTRACT_CLL_MSR,EXTRACT_FRM_TRM,nco_pck_plc_nil,&flg_dne,trv_tbl);
/* Were all user-specified dimensions found? */
(void)nco_chk_dmn(lmt_nbr,flg_dne);
/* Create reversed dimension list */
if(dmn_rdr_nbr_in > 0){
dmn_rvr_rdr=(nco_bool *)nco_malloc(dmn_rdr_nbr_in*sizeof(nco_bool));
/* Is dimension to be reversed? i.e., does string begin with minus-sign '-'? */
for(idx_rdr=0;idx_rdr<dmn_rdr_nbr_in;idx_rdr++){
if(dmn_rdr_lst_in[idx_rdr][0] == '-'){
dmn_rvr_rdr[idx_rdr]=True;
/* Strip-out '-': Copy string to new memory one past negative sign to avoid losing byte */
optarg_lcl=dmn_rdr_lst_in[idx_rdr];
dmn_rdr_lst_in[idx_rdr]=(char *)strdup(optarg_lcl+1L);
optarg_lcl=(char *)nco_free(optarg_lcl);
}else{
dmn_rvr_rdr[idx_rdr]=False;
} /* !'-' */
} /* !idx_rdr */
} /* !dmn_rdr_nbr_in */
/* Get number of variables, dimensions, and global attributes in file, file format */
(void)trv_tbl_inq((int *)NULL,(int *)NULL,(int *)NULL,&nbr_dmn_fl,(int *)NULL,(int *)NULL,(int *)NULL,(int *)NULL,&nbr_var_fl,trv_tbl);
/* Create list of dimensions to average(ncwa)/re-order(ncpdq) */
if(IS_REORDER) (void)nco_dmn_avg_mk(in_id,dmn_rdr_lst_in,dmn_rdr_nbr_in,flg_dmn_prc_usr_spc,False,trv_tbl,&dmn_rdr_trv,&dmn_rdr_nbr_trv);
/* Fill-in variable structure list for all extracted variables */
var=nco_fll_var_trv(in_id,&xtr_nbr,trv_tbl);
/* Duplicate to output array */
var_out=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *));
for(idx=0;idx<xtr_nbr;idx++){
var_out[idx]=nco_var_dpl(var[idx]);
(void)nco_xrf_var(var[idx],var_out[idx]);
(void)nco_xrf_dmn(var_out[idx]);
} /* end loop over variables */
/* Refresh var_out with dim_out data */
(void)nco_var_dmn_refresh(var_out,xtr_nbr);
/* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */
cnv=nco_cnv_ini(in_id);
/* Divide variable lists into lists of fixed variables and variables to be processed */
(void)nco_var_lst_dvd(var,var_out,xtr_nbr,cnv,True,nco_pck_map,nco_pck_plc,dmn_rdr_trv,dmn_rdr_nbr_trv,&var_fix,&var_fix_out,&nbr_var_fix,&var_prc,&var_prc_out,&nbr_var_prc,trv_tbl);
/* Store processed and fixed variables info into GTT */
(void)nco_var_prc_fix_trv(nbr_var_prc,var_prc,nbr_var_fix,var_fix,trv_tbl);
/* We now have final list of variables to extract. Phew. */
/* Make output and input files consanguinous */
if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt;
/* Initialize, decode, and set PPC information */
if(ppc_nbr > 0) nco_ppc_ini(in_id,&dfl_lvl,fl_out_fmt,ppc_arg,ppc_nbr,trv_tbl);
/* Verify output file format supports requested actions */
(void)nco_fl_fmt_vet(fl_out_fmt,cnk_nbr,dfl_lvl);
/* Open output file */
fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,WRT_TMP_FL,&out_id);
/* Initialize chunking from user-specified inputs */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) rcd+=nco_cnk_ini(in_id,fl_out,cnk_arg,cnk_nbr,cnk_map,cnk_plc,cnk_csh_byt,cnk_min_byt,cnk_sz_byt,cnk_sz_scl,&cnk);
if(IS_REORDER){
dmn_sct **dmn_rdr=NULL; /* [sct] Dimension structures to be re-ordered */
/* "dmn_rdr" is only used for input to function nco_var_dmn_rdr_mtd(), that compares dimensions by short name;
this is because the input list of -a are dimension short names; group support is obtained combining with -g option;
on input it contains a list of dimension short names (in "dmn_rdr"), that together with input array "dmn_rvr_rdr"
of flags that determine if dimension at index dmn_rvr_rdr[index] is to be reversed; use cases:
in_grp_8.nc contains the dimensions /g1/lat, /g1/lon, /g2/lat, /g2/lon
ncpdq -O -v lat,lon -a -lat,-lon -g g1,g2 ~/nco/data/in_grp_8.nc out1.nc
"dmn_rdr" contains names ["lat"], ["lon"], striped of '-' (minus) sign and dmn_rvr_rdr contains [True],[True ]
output is reversed /g1/lat, /g1/lon, /g2/lat, /g2/lon
ncpdq -O -v lat,lon -a lat,-lon -g g1,g2 ~/nco/data/in_grp_8.nc out1.nc
"dmn_rdr" contains names ["lat"], ["lon"], and dmn_rvr_rdr contains [False],[True ]
output is reversed /g1/lon, /g2/lon */
/* Form list of re-ordering dimensions from extracted input dimensions */
dmn_rdr=(dmn_sct **)nco_malloc(dmn_rdr_nbr*sizeof(dmn_sct *));
/* Initialize re-ordering dimensions; initialize only short name */
for(idx_rdr=0;idx_rdr<dmn_rdr_nbr_in;idx_rdr++){
dmn_rdr[idx_rdr]=(dmn_sct *)nco_malloc(sizeof(dmn_sct));
dmn_rdr[idx_rdr]->nm=(char *)strdup(dmn_rdr_lst_in[idx_rdr]);
dmn_rdr[idx_rdr]->nm_fll=NULL;
dmn_rdr[idx_rdr]->id=-1;
}
/* Determine and set new dimensionality in metadata of each re-ordered variable */
(void)nco_var_dmn_rdr_mtd_trv(trv_tbl,nbr_var_prc,var_prc,var_prc_out,nbr_var_fix,var_fix,dmn_rdr,dmn_rdr_nbr,dmn_rvr_rdr);
for(idx_rdr=0; idx_rdr<dmn_rdr_nbr_in; idx_rdr++){
dmn_rdr[idx_rdr]->nm=(char *)nco_free(dmn_rdr[idx_rdr]->nm);
dmn_rdr[idx_rdr]=(dmn_sct *)nco_free(dmn_rdr[idx_rdr]);
}
dmn_rdr=(dmn_sct **)nco_free(dmn_rdr);
} /* IS_REORDER */
/* Alter metadata for variables that will be packed */
if(nco_pck_plc != nco_pck_plc_nil){
if(nco_pck_plc != nco_pck_plc_upk){
/* Allocate attribute list container for maximum number of entries */
aed_lst_add_fst=(aed_sct *)nco_malloc(nbr_var_prc*sizeof(aed_sct));
aed_lst_scl_fct=(aed_sct *)nco_malloc(nbr_var_prc*sizeof(aed_sct));
} /* endif packing */
for(idx=0;idx<nbr_var_prc;idx++){
nco_pck_mtd(var_prc[idx],var_prc_out[idx],nco_pck_map,nco_pck_plc);
if(nco_pck_plc != nco_pck_plc_upk){
/* Use same copy of attribute name for all edits */
aed_lst_add_fst[idx].att_nm=add_fst_sng;
aed_lst_scl_fct[idx].att_nm=scl_fct_sng;
} /* endif packing */
} /* end loop over var_prc */
/* Transfer variable type to table. NB: Use processed variables set with new type. MUST be done before definition. */
(void)nco_var_typ_trv(nbr_var_prc,var_prc_out,trv_tbl);
} /* nco_pck_plc == nco_pck_plc_nil */
/* Define dimensions, extracted groups, variables, and attributes in output file. NB: record name is NULL */
(void)nco_xtr_dfn(in_id,out_id,&cnk,dfl_lvl,gpe,md5,!FORCE_APPEND,True,False,nco_pck_plc,(char *)NULL,trv_tbl);
/* Catenate time-stamped command line to "history" global attribute */
if(HISTORY_APPEND) (void)nco_hst_att_cat(out_id,cmd_ln);
if(HISTORY_APPEND && FORCE_APPEND) (void)nco_prv_att_cat(fl_in,in_id,out_id);
if(gaa_nbr > 0) (void)nco_glb_att_add(out_id,gaa_arg,gaa_nbr);
if(HISTORY_APPEND) (void)nco_vrs_att_cat(out_id);
if(thr_nbr > 1 && HISTORY_APPEND) (void)nco_thr_att_cat(out_id,thr_nbr);
/* Turn-off default filling behavior to enhance efficiency */
nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
/* Take output file out of define mode */
if(hdr_pad == 0UL){
(void)nco_enddef(out_id);
}else{
(void)nco__enddef(out_id,hdr_pad);
if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO Padding header with %lu extra bytes\n",nco_prg_nm_get(),(unsigned long)hdr_pad);
} /* hdr_pad */
/* Assign zero to start and unity to stride vectors in output variables */
(void)nco_var_srd_srt_set(var_out,xtr_nbr);
/* Copy variable data for non-processed variables */
(void)nco_cpy_fix_var_trv(in_id,out_id,gpe,trv_tbl);
/* Close first input netCDF file */
nco_close(in_id);
/* Loop over input files (not currently used, fl_nbr == 1) */
for(fl_idx=0;fl_idx<fl_nbr;fl_idx++){
/* Parse filename */
if(fl_idx != 0) fl_in=nco_fl_nm_prs(fl_in,fl_idx,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth);
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"%s: INFO Input file %d is %s",nco_prg_nm_get(),fl_idx,fl_in);
/* Make sure file is on local system and is readable or die trying */
if(fl_idx != 0) fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
if(nco_dbg_lvl >= nco_dbg_fl && FL_RTR_RMT_LCN) (void)fprintf(stderr,", local file is %s",fl_in);
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"\n");
/* Open file once per thread to improve caching */
for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,in_id_arr+thr_idx);
/* Timestamp end of metadata setup and disk layout */
rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info);
ddra_info.tmr_flg=nco_tmr_rgl;
#ifdef _OPENMP
#pragma omp parallel for private(idx,in_id) shared(aed_lst_add_fst,aed_lst_scl_fct,nco_dbg_lvl,dmn_rdr_nbr,gpe,in_id_arr,nbr_var_prc,nco_pck_map,nco_pck_plc,out_id,nco_prg_nm,rcd,var_prc,var_prc_out,nbr_dmn_fl,trv_tbl,IS_REORDER,fl_out_fmt)
#endif /* !_OPENMP */
/* Process all variables in current file */
for(idx=0;idx<nbr_var_prc;idx++){
char *grp_out_fll=NULL; /* [sng] Group name */
int grp_out_id; /* [ID] Group ID (output) */
int var_out_id; /* [ID] Variable ID (output) */
trv_sct *var_trv; /* [sct] Variable GTT object */
in_id=in_id_arr[omp_get_thread_num()];
var_prc[idx]->nc_id=in_id;
if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm);
if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr);
/* Obtain variable GTT object using full variable name */
var_trv=trv_tbl_var_nm_fll(var_prc[idx]->nm_fll,trv_tbl);
/* Retrieve variable from disk into memory */
(void)nco_msa_var_get_trv(in_id,var_prc[idx],trv_tbl);
/* If re-ordering */
if(IS_REORDER){
if((var_prc_out[idx]->val.vp=(void *)nco_malloc_flg(var_prc_out[idx]->sz*nco_typ_lng(var_prc_out[idx]->type))) == NULL){
(void)fprintf(fp_stdout,"%s: ERROR Unable to malloc() %ld*%lu bytes for value buffer for variable %s in main()\n",nco_prg_nm_get(),var_prc_out[idx]->sz,(unsigned long)nco_typ_lng(var_prc_out[idx]->type),var_prc_out[idx]->nm);
nco_exit(EXIT_FAILURE);
} /* endif err */
/* Change dimensionionality of values */
(void)nco_var_dmn_rdr_val_trv(var_prc[idx],var_prc_out[idx],trv_tbl);
/* Re-ordering required two value buffers, time to free() input buffer */
var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp);
} /* IS_REORDER */
/* Edit group name for output */
if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->grp_nm_fll); else grp_out_fll=(char *)strdup(var_trv->grp_nm_fll);
/* Obtain output group ID */
(void)nco_inq_grp_full_ncid(out_id,grp_out_fll,&grp_out_id);
/* Memory management after current extracted group */
if(grp_out_fll) grp_out_fll=(char *)nco_free(grp_out_fll);
/* Get variable ID */
(void)nco_inq_varid(grp_out_id,var_trv->nm,&var_out_id);
/* Store the output variable ID */
var_prc_out[idx]->id=var_out_id;
if(nco_pck_plc != nco_pck_plc_nil){
/* Copy input variable buffer to processed variable buffer */
/* fxm: this is dangerous and leads to double free()'ing variable buffer */
var_prc_out[idx]->val=var_prc[idx]->val;
/* (Un-)Pack variable according to packing specification */
nco_pck_val(var_prc[idx],var_prc_out[idx],nco_pck_map,nco_pck_plc,aed_lst_add_fst+idx,aed_lst_scl_fct+idx);
} /* endif nco_pck_plc != nco_pck_plc_nil */
if(var_trv->ppc != NC_MAX_INT){
if(var_trv->flg_nsd) (void)nco_ppc_bitmask(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val); else (void)nco_ppc_around(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val);
} /* endif ppc */
if(nco_is_xcp(var_trv->nm)) nco_xcp_prc(var_trv->nm,var_prc_out[idx]->type,var_prc_out[idx]->sz,(char *)var_prc_out[idx]->val.vp);
#ifdef _OPENMP
#pragma omp critical
#endif /* _OPENMP */
{ /* begin OpenMP critical */
/* Copy variable to output file then free value buffer */
if(var_prc_out[idx]->nbr_dim == 0){
(void)nco_put_var1(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type);
}else{ /* end if variable is scalar */
(void)nco_put_vara(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type);
} /* end if variable is array */
} /* end OpenMP critical */
/* Free current output buffer */
var_prc_out[idx]->val.vp=nco_free(var_prc_out[idx]->val.vp);
} /* end (OpenMP parallel for) loop over idx */
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(fp_stderr,"\n");
/* Write/overwrite packing attributes for newly packed and re-packed variables
Logic here should nearly mimic logic in nco_var_dfn() */
if(nco_pck_plc != nco_pck_plc_nil && nco_pck_plc != nco_pck_plc_upk){
/* ...put file in define mode to allow metadata writing... */
(void)nco_redef(out_id);
/* ...loop through all variables that may have been packed... */
for(idx=0;idx<nbr_var_prc;idx++){
char *grp_out_fll=NULL; /* [sng] Group name */
int grp_out_id; /* [ID] Group ID (output) */
int var_out_id; /* [ID] Variable ID (output) */
trv_sct *var_trv; /* [sct] Variable GTT object */
/* Obtain variable GTT object using full variable name */
var_trv=trv_tbl_var_nm_fll(var_prc[idx]->nm_fll,trv_tbl);
/* Edit group name for output */
if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->grp_nm_fll); else grp_out_fll=(char *)strdup(var_trv->grp_nm_fll);
/* Obtain output group ID */
(void)nco_inq_grp_full_ncid(out_id,grp_out_fll,&grp_out_id);
/* Memory management after current extracted group */
if(grp_out_fll) grp_out_fll=(char *)nco_free(grp_out_fll);
/* Get variable ID */
(void)nco_inq_varid(grp_out_id,var_trv->nm,&var_out_id);
/* nco_var_dfn() pre-defined dummy packing attributes in output file only for "packable" input variables */
if(nco_pck_plc_typ_get(nco_pck_map,var_prc[idx]->typ_upk,(nc_type *)NULL)){
/* Verify input variable was newly packed by this operator
Writing pre-existing (non-re-packed) attributes here would fail because
nco_pck_dsk_inq() never fills in var->scl_fct.vp and var->add_fst.vp
Logic is same as in nco_var_dfn() (except var_prc[] instead of var[])
If operator newly packed this particular variable... */
if(
/* ...either because operator newly packs all variables... */
(nco_pck_plc == nco_pck_plc_all_new_att && nco_pck_map != nco_pck_map_dbl_flt && nco_pck_map != nco_pck_map_flt_dbl) ||
/* ...or because operator newly packs un-packed variables like this one... */
(nco_pck_plc == nco_pck_plc_all_xst_att && !var_prc[idx]->pck_ram) ||
/* ...or because operator re-packs packed variables like this one... */
(nco_pck_plc == nco_pck_plc_xst_new_att && var_prc[idx]->pck_ram)
){
/* Replace dummy packing attributes with final values, or delete them */
if(nco_dbg_lvl >= nco_dbg_io) (void)fprintf(stderr,"%s: main() replacing dummy packing attribute values for variable %s\n",nco_prg_nm,var_prc[idx]->nm);
(void)nco_aed_prc(grp_out_id,aed_lst_add_fst[idx].id,aed_lst_add_fst[idx]);
(void)nco_aed_prc(grp_out_id,aed_lst_scl_fct[idx].id,aed_lst_scl_fct[idx]);
} /* endif variable is newly packed by this operator */
} /* !nco_pck_plc_alw */
} /* end loop over var_prc */
/* Take output file out of define mode */
if(hdr_pad == 0UL) (void)nco_enddef(out_id); else (void)nco__enddef(out_id,hdr_pad);
} /* nco_pck_plc == nco_pck_plc_nil || nco_pck_plc == nco_pck_plc_upk */
/* Close input netCDF file */
for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_arr[thr_idx]);
/* Remove local copy of file */
if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in);
} /* end loop over fl_idx */
/* Close output file and move it from temporary to permanent location */
(void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id);
/* Clean memory unless dirty memory allowed */
if(flg_mmr_cln){
/* ncpdq-specific memory cleanup */
if(dmn_rdr_nbr > 0){
if(dmn_rdr_nbr_in > 0) dmn_rdr_lst_in=nco_sng_lst_free(dmn_rdr_lst_in,dmn_rdr_nbr_in);
dmn_rvr_rdr=(nco_bool *)nco_free(dmn_rvr_rdr);
/* Free dimension list pointers */
for(idx_rdr=0; idx_rdr<dmn_rdr_nbr_trv; idx_rdr++){
dmn_rdr_trv[idx_rdr]->nm=(char *)nco_free(dmn_rdr_trv[idx_rdr]->nm);
dmn_rdr_trv[idx_rdr]->nm_fll=(char *)nco_free(dmn_rdr_trv[idx_rdr]->nm_fll);
dmn_rdr_trv[idx_rdr]=(dmn_sct *)nco_free(dmn_rdr_trv[idx_rdr]);
}
dmn_rdr_trv=(dmn_sct **)nco_free(dmn_rdr_trv);
/* Dimension structures in dmn_rdr are owned by dmn and dmn_out, free'd later */
} /* endif dmn_rdr_nbr > 0 */
if(nco_pck_plc != nco_pck_plc_nil){
if(nco_pck_plc_sng) nco_pck_plc_sng=(char *)nco_free(nco_pck_plc_sng);
if(nco_pck_map_sng) nco_pck_map_sng=(char *)nco_free(nco_pck_map_sng);
if(nco_pck_plc != nco_pck_plc_upk){
/* No need for loop over var_prc variables to free attribute values
Variable structures and attribute edit lists share same attribute values
Free them only once, and do it in nco_var_free() */
aed_lst_add_fst=(aed_sct *)nco_free(aed_lst_add_fst);
aed_lst_scl_fct=(aed_sct *)nco_free(aed_lst_scl_fct);
} /* nco_pck_plc == nco_pck_plc_upk */
} /* nco_pck_plc == nco_pck_plc_nil */
/* NCO-generic clean-up */
/* Free individual strings/arrays */
if(cmd_ln) cmd_ln=(char *)nco_free(cmd_ln);
if(cnk_map_sng) cnk_map_sng=(char *)nco_free(cnk_map_sng);
if(cnk_plc_sng) cnk_plc_sng=(char *)nco_free(cnk_plc_sng);
if(fl_in) fl_in=(char *)nco_free(fl_in);
if(fl_out) fl_out=(char *)nco_free(fl_out);
if(fl_out_tmp) fl_out_tmp=(char *)nco_free(fl_out_tmp);
if(fl_pth) fl_pth=(char *)nco_free(fl_pth);
if(fl_pth_lcl) fl_pth_lcl=(char *)nco_free(fl_pth_lcl);
if(in_id_arr) in_id_arr=(int *)nco_free(in_id_arr);
/* Free lists of strings */
if(fl_lst_in && fl_lst_abb == NULL) fl_lst_in=nco_sng_lst_free(fl_lst_in,fl_nbr);
if(fl_lst_in && fl_lst_abb) fl_lst_in=nco_sng_lst_free(fl_lst_in,1);
if(fl_lst_abb) fl_lst_abb=nco_sng_lst_free(fl_lst_abb,abb_arg_nbr);
if(gaa_nbr > 0) gaa_arg=nco_sng_lst_free(gaa_arg,gaa_nbr);
if(var_lst_in_nbr > 0) var_lst_in=nco_sng_lst_free(var_lst_in,var_lst_in_nbr);
/* Free limits */
for(idx=0;idx<aux_nbr;idx++) aux_arg[idx]=(char *)nco_free(aux_arg[idx]);
for(idx=0;idx<lmt_nbr;idx++) lmt_arg[idx]=(char *)nco_free(lmt_arg[idx]);
for(idx=0;idx<ppc_nbr;idx++) ppc_arg[idx]=(char *)nco_free(ppc_arg[idx]);
/* Free chunking information */
for(idx=0;idx<cnk_nbr;idx++) cnk_arg[idx]=(char *)nco_free(cnk_arg[idx]);
if(cnk_nbr > 0 && (fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC)) cnk.cnk_dmn=(cnk_dmn_sct **)nco_cnk_lst_free(cnk.cnk_dmn,cnk_nbr);
if(xtr_nbr > 0) var=nco_var_lst_free(var,xtr_nbr);
if(xtr_nbr > 0) var_out=nco_var_lst_free(var_out,xtr_nbr);
var_prc=(var_sct **)nco_free(var_prc);
var_prc_out=(var_sct **)nco_free(var_prc_out);
var_fix=(var_sct **)nco_free(var_fix);
var_fix_out=(var_sct **)nco_free(var_fix_out);
trv_tbl_free(trv_tbl);
for(idx=0;idx<lmt_nbr;idx++) flg_dne[idx].dim_nm=(char *)nco_free(flg_dne[idx].dim_nm);
if(flg_dne) flg_dne=(nco_dmn_dne_t *)nco_free(flg_dne);
if(gpe) gpe=(gpe_sct *)nco_gpe_free(gpe);
} /* !flg_mmr_cln */
#ifdef ENABLE_MPI
MPI_Finalize();
#endif /* !ENABLE_MPI */
/* End timer */
ddra_info.tmr_flg=nco_tmr_end; /* [enm] Timer flag */
rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info);
if(rcd != NC_NOERR) nco_err_exit(rcd,"main");
nco_exit_gracefully();
return EXIT_SUCCESS;
} /* end main() */
|
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/identify.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/magick.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/segment.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _EdgeInfo
{
double
left,
right,
top,
bottom;
} EdgeInfo;
static double GetEdgeBackgroundCensus(const Image *image,
const CacheView *image_view,const GravityType gravity,const size_t width,
const size_t height,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
CacheView
*edge_view;
const char
*artifact;
double
census;
Image
*edge_image;
PixelInfo
background,
pixel;
RectangleInfo
edge_geometry;
const Quantum
*p;
ssize_t
y;
/*
Determine the percent of image background for this edge.
*/
switch (gravity)
{
case NorthWestGravity:
case NorthGravity:
default:
{
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
break;
}
case NorthEastGravity:
case EastGravity:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
break;
}
case SouthEastGravity:
case SouthGravity:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,
(ssize_t) image->rows-1,1,1,exception);
break;
}
case SouthWestGravity:
case WestGravity:
{
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
break;
}
}
GetPixelInfoPixel(image,p,&background);
artifact=GetImageArtifact(image,"background");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&background,exception);
artifact=GetImageArtifact(image,"trim:background-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&background,exception);
edge_geometry.width=width;
edge_geometry.height=height;
edge_geometry.x=x_offset;
edge_geometry.y=y_offset;
GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry);
edge_image=CropImage(image,&edge_geometry,exception);
if (edge_image == (Image *) NULL)
return(0.0);
census=0.0;
edge_view=AcquireVirtualCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
ssize_t
x;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
GetPixelInfoPixel(edge_image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse)
census++;
p+=GetPixelChannels(edge_image);
}
}
census/=((double) edge_image->columns*edge_image->rows);
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
return(census);
}
static inline double GetMinEdgeBackgroundCensus(const EdgeInfo *edge)
{
double
census;
census=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top),
edge->bottom);
return(census);
}
static RectangleInfo GetEdgeBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*edge_view;
const char
*artifact;
double
background_census,
percent_background;
EdgeInfo
edge,
vertex;
Image
*edge_image;
RectangleInfo
bounds;
/*
Get the image bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
SetGeometry(image,&bounds);
edge_image=CloneImage(image,0,0,MagickTrue,exception);
if (edge_image == (Image *) NULL)
return(bounds);
(void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page);
(void) memset(&vertex,0,sizeof(vertex));
edge_view=AcquireVirtualCacheView(edge_image,exception);
edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,WestGravity,
1,0,0,0,exception);
edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,EastGravity,
1,0,0,0,exception);
edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,NorthGravity,
0,1,0,0,exception);
edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,SouthGravity,
0,1,0,0,exception);
percent_background=1.0;
artifact=GetImageArtifact(edge_image,"trim:percent-background");
if (artifact != (const char *) NULL)
percent_background=StringToDouble(artifact,(char **) NULL)/100.0;
percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon),
1.0);
background_census=GetMinEdgeBackgroundCensus(&edge);
for ( ; background_census < percent_background;
background_census=GetMinEdgeBackgroundCensus(&edge))
{
if ((bounds.width == 0) || (bounds.height == 0))
break;
if (fabs(edge.left-background_census) < MagickEpsilon)
{
/*
Trim left edge.
*/
vertex.left++;
bounds.width--;
edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
if (fabs(edge.right-background_census) < MagickEpsilon)
{
/*
Trim right edge.
*/
vertex.right++;
bounds.width--;
edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
if (fabs(edge.top-background_census) < MagickEpsilon)
{
/*
Trim top edge.
*/
vertex.top++;
bounds.height--;
edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
continue;
}
if (fabs(edge.bottom-background_census) < MagickEpsilon)
{
/*
Trim bottom edge.
*/
vertex.bottom++;
bounds.height--;
edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
}
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
bounds.x=(ssize_t) vertex.left;
bounds.y=(ssize_t) vertex.top;
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return(bounds);
}
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
status;
PixelInfo
target[4],
zero;
RectangleInfo
bounds;
const Quantum
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
artifact=GetImageArtifact(image,"trim:percent-background");
if (artifact != (const char *) NULL)
return(GetEdgeBoundingBox(image,exception));
artifact=GetImageArtifact(image, "trim:edges");
if (artifact == (const char *) NULL)
{
bounds.width=0;
bounds.height=0;
bounds.x=(ssize_t) image->columns;
bounds.y=(ssize_t) image->rows;
}
else
{
char
*edges,
*p,
*q;
bounds.width=(size_t) image->columns;
bounds.height=(size_t) image->rows;
bounds.x=0;
bounds.y=0;
edges=AcquireString(artifact);
q=edges;
while ((p=StringToken(",",&q)) != (char *) NULL)
{
if (LocaleCompare(p,"north") == 0)
bounds.y=(ssize_t) image->rows;
if (LocaleCompare(p,"east") == 0)
bounds.width=0;
if (LocaleCompare(p,"south") == 0)
bounds.height=0;
if (LocaleCompare(p,"west") == 0)
bounds.x=(ssize_t) image->columns;
}
edges=DestroyString(edges);
}
GetPixelInfo(image,&target[0]);
image_view=AcquireVirtualCacheView(image,exception);
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
return(bounds);
}
GetPixelInfoPixel(image,p,&target[0]);
GetPixelInfo(image,&target[1]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
if (p != (const Quantum *) NULL)
GetPixelInfoPixel(image,p,&target[1]);
GetPixelInfo(image,&target[2]);
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
if (p != (const Quantum *) NULL)
GetPixelInfoPixel(image,p,&target[2]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,(ssize_t)
image->rows-1,1,1,exception);
if (p != (const Quantum *) NULL)
GetPixelInfoPixel(image,p,&target[3]);
status=MagickTrue;
GetPixelInfo(image,&zero);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
RectangleInfo
bounding_box;
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
bounding_box=bounds;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,p,&pixel);
if ((x < bounding_box.x) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse))
bounding_box.x=x;
if ((x > (ssize_t) bounding_box.width) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse))
bounding_box.width=(size_t) x;
if ((y < bounding_box.y) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse))
bounding_box.y=y;
if ((y > (ssize_t) bounding_box.height) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse))
bounding_box.height=(size_t) y;
if ((x < (ssize_t) bounding_box.width) &&
(y > (ssize_t) bounding_box.height) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[3]) == MagickFalse))
{
bounding_box.width=(size_t) x;
bounding_box.height=(size_t) y;
}
p+=GetPixelChannels(image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
{
if (bounding_box.x < bounds.x)
bounds.x=bounding_box.x;
if (bounding_box.y < bounds.y)
bounds.y=bounding_box.y;
if (bounding_box.width > bounds.width)
bounds.width=bounding_box.width;
if (bounding_box.height > bounds.height)
bounds.height=bounding_box.height;
}
}
image_view=DestroyCacheView(image_view);
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
else
{
bounds.width-=(bounds.x-1);
bounds.height-=(bounds.y-1);
}
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C o n v e x H u l l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageConvexHull() returns the convex hull points of an image canvas.
%
% The format of the GetImageConvexHull method is:
%
% PointInfo *GetImageConvexHull(const Image *image,
% size_t number_vertices,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_vertices: the number of vertices in the convex hull.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double LexicographicalOrder(PointInfo *a,PointInfo *b,PointInfo *c)
{
/*
Order by x-coordinate, and in case of a tie, by y-coordinate.
*/
return((b->x-a->x)*(c->y-a->y)-(b->y-a->y)*(c->x-a->x));
}
static PixelInfo GetEdgeBackgroundColor(const Image *image,
const CacheView *image_view,ExceptionInfo *exception)
{
const char
*artifact;
double
census[4],
edge_census;
PixelInfo
background[4],
edge_background;
ssize_t
i;
/*
Most dominant color of edges/corners is the background color of the image.
*/
artifact=GetImageArtifact(image,"convex-hull:background-color");
if (artifact == (const char *) NULL)
artifact=GetImageArtifact(image,"background");
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i < 4; i++)
{
CacheView
*edge_view;
GravityType
gravity;
Image
*edge_image;
PixelInfo
pixel;
RectangleInfo
edge_geometry;
const Quantum
*p;
ssize_t
y;
census[i]=0.0;
(void) memset(&edge_geometry,0,sizeof(edge_geometry));
switch (i)
{
case 0:
default:
{
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
gravity=WestGravity;
edge_geometry.width=1;
edge_geometry.height=0;
}
case 1:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
gravity=EastGravity;
edge_geometry.width=1;
edge_geometry.height=0;
}
case 2:
{
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
gravity=NorthGravity;
edge_geometry.width=0;
edge_geometry.height=1;
}
case 3:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,
(ssize_t) image->rows-1,1,1,exception);
gravity=SouthGravity;
edge_geometry.width=0;
edge_geometry.height=1;
}
}
GetPixelInfoPixel(image,p,background+i);
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,background+i,
exception);
GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry);
edge_image=CropImage(image,&edge_geometry,exception);
if (edge_image == (Image *) NULL)
continue;
edge_view=AcquireVirtualCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
ssize_t
x;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
GetPixelInfoPixel(edge_image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,background+i) == MagickFalse)
census[i]++;
p+=GetPixelChannels(edge_image);
}
}
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
}
edge_census=(-1.0);
for (i=0; i < 4; i++)
if (census[i] > edge_census)
{
edge_background=background[i];
edge_census=census[i];
}
return(edge_background);
}
void TraceConvexHull(PointInfo *vertices,size_t number_vertices,
PointInfo ***monotone_chain,size_t *chain_length)
{
PointInfo
**chain;
ssize_t
i;
size_t
demark,
n;
/*
Construct the upper and lower hulls: rightmost to leftmost counterclockwise.
*/
chain=(*monotone_chain);
n=0;
for (i=0; i < (ssize_t) number_vertices; i++)
{
while ((n >= 2) &&
(LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0))
n--;
chain[n++]=(&vertices[i]);
}
demark=n+1;
for (i=(ssize_t) number_vertices-2; i >= 0; i--)
{
while ((n >= demark) &&
(LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0))
n--;
chain[n++]=(&vertices[i]);
}
*chain_length=n;
}
MagickExport PointInfo *GetImageConvexHull(const Image *image,
size_t *number_vertices,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MemoryInfo
*monotone_info,
*vertices_info;
PixelInfo
background;
PointInfo
*convex_hull,
**monotone_chain,
*vertices;
size_t
n;
ssize_t
y;
/*
Identify convex hull vertices of image foreground object(s).
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*number_vertices=0;
vertices_info=AcquireVirtualMemory(image->columns,image->rows*
sizeof(*vertices));
monotone_info=AcquireVirtualMemory(2*image->columns,2*
image->rows*sizeof(*monotone_chain));
if ((vertices_info == (MemoryInfo *) NULL) ||
(monotone_info == (MemoryInfo *) NULL))
{
if (monotone_info != (MemoryInfo *) NULL)
monotone_info=(MemoryInfo *) RelinquishVirtualMemory(monotone_info);
if (vertices_info != (MemoryInfo *) NULL)
vertices_info=RelinquishVirtualMemory(vertices_info);
return((PointInfo *) NULL);
}
vertices=(PointInfo *) GetVirtualMemoryBlob(vertices_info);
monotone_chain=(PointInfo **) GetVirtualMemoryBlob(monotone_info);
image_view=AcquireVirtualCacheView(image,exception);
background=GetEdgeBackgroundColor(image,image_view,exception);
status=MagickTrue;
n=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse)
{
vertices[n].x=(double) x;
vertices[n].y=(double) y;
n++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Return the convex hull of the image foreground object(s).
*/
TraceConvexHull(vertices,n,&monotone_chain,number_vertices);
convex_hull=(PointInfo *) AcquireQuantumMemory(*number_vertices,
sizeof(*convex_hull));
if (convex_hull != (PointInfo *) NULL)
for (n=0; n < *number_vertices; n++)
convex_hull[n]=(*monotone_chain[n]);
monotone_info=RelinquishVirtualMemory(monotone_info);
vertices_info=RelinquishVirtualMemory(vertices_info);
return(convex_hull);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDepth() returns the depth of a particular image channel.
%
% The format of the GetImageDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
i;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (i=0; i < (ssize_t) number_threads; i++)
current_depth[i]=1;
if ((image->storage_class == PseudoClass) &&
(image->alpha_trait == UndefinedPixelTrait))
{
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickBooleanType
atDepth;
QuantumAny
range;
atDepth=MagickTrue;
range=GetQuantumRange(current_depth[id]);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) &&
(GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) &&
(GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse))
break;
current_depth[id]++;
}
}
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireVirtualCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((1UL*QuantumRange) <= MaxMap)
{
size_t
*depth_map;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
{
unsigned int
depth;
for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++)
{
Quantum
pixel;
QuantumAny
range;
range=GetQuantumRange(depth);
pixel=(Quantum) i;
if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range))
break;
}
depth_map[i]=depth;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (depth_map[ScaleQuantumToMap(p[i])] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(p[i])];
}
p+=GetPixelChannels(image);
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
depth_map=(size_t *) RelinquishMagickMemory(depth_map);
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
#endif
/*
Compute pixel depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
QuantumAny
range;
range=GetQuantumRange(current_depth[id]);
if (p[i] == ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),range))
break;
current_depth[id]++;
}
}
p+=GetPixelChannels(image);
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M i n i m u m B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMinimumBoundingBox() returns the points that form the minimum
% bounding box around the image foreground objects with the "Rotating
% Calipers" algorithm. The method also returns these properties:
% minimum-bounding-box:area, minimum-bounding-box:width,
% minimum-bounding-box:height, and minimum-bounding-box:angle.
%
% The format of the GetImageMinimumBoundingBox method is:
%
% PointInfo *GetImageMinimumBoundingBox(Image *image,
% size_t number_vertices,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_vertices: the number of vertices in the bounding box.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _CaliperInfo
{
double
area,
width,
height,
projection;
ssize_t
p,
q,
v;
} CaliperInfo;
static inline double getAngle(PointInfo *p,PointInfo *q)
{
/*
Get the angle between line (p,q) and horizontal axis, in degrees.
*/
return(RadiansToDegrees(atan2(q->y-p->y,q->x-p->x)));
}
static inline double getDistance(PointInfo *p,PointInfo *q)
{
double
distance;
distance=hypot(p->x-q->x,p->y-q->y);
return(distance*distance);
}
static inline double getProjection(PointInfo *p,PointInfo *q,PointInfo *v)
{
double
distance;
/*
Projection of vector (x,y) - p into a line passing through p and q.
*/
distance=getDistance(p,q);
if (distance < MagickEpsilon)
return(INFINITY);
return((q->x-p->x)*(v->x-p->x)+(v->y-p->y)*(q->y-p->y))/sqrt(distance);
}
static inline double getFeretDiameter(PointInfo *p,PointInfo *q,PointInfo *v)
{
double
distance;
/*
Distance from a point (x,y) to a line passing through p and q.
*/
distance=getDistance(p,q);
if (distance < MagickEpsilon)
return(INFINITY);
return((q->x-p->x)*(v->y-p->y)-(v->x-p->x)*(q->y-p->y))/sqrt(distance);
}
MagickExport PointInfo *GetImageMinimumBoundingBox(Image *image,
size_t *number_vertices,ExceptionInfo *exception)
{
CaliperInfo
caliper_info;
const char
*artifact;
double
angle,
diameter,
distance;
PointInfo
*bounding_box,
*vertices;
ssize_t
i;
size_t
number_hull_vertices;
/*
Generate the minimum bounding box with the "Rotating Calipers" algorithm.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*number_vertices=0;
vertices=GetImageConvexHull(image,&number_hull_vertices,exception);
if (vertices == (PointInfo *) NULL)
return((PointInfo *) NULL);
*number_vertices=4;
bounding_box=(PointInfo *) AcquireQuantumMemory(*number_vertices,
sizeof(*bounding_box));
if (bounding_box == (PointInfo *) NULL)
{
vertices=(PointInfo *) RelinquishMagickMemory(vertices);
return((PointInfo *) NULL);
}
caliper_info.area=2.0*image->columns*image->rows;
caliper_info.width=(double) image->columns+image->rows;
caliper_info.height=0.0;
caliper_info.projection=0.0;
caliper_info.p=(-1);
caliper_info.q=(-1);
caliper_info.v=(-1);
for (i=0; i < (ssize_t) number_hull_vertices; i++)
{
double
area = 0.0,
max_projection = 0.0,
min_diameter = -1.0,
min_projection = 0.0;
ssize_t
j,
k;
ssize_t
p = -1,
q = -1,
v = -1;
for (j=0; j < (ssize_t) number_hull_vertices; j++)
{
double
diameter;
diameter=fabs(getFeretDiameter(&vertices[i],
&vertices[(i+1) % number_hull_vertices],&vertices[j]));
if (min_diameter < diameter)
{
min_diameter=diameter;
p=i;
q=(i+1) % number_hull_vertices;
v=j;
}
}
for (k=0; k < (ssize_t) number_hull_vertices; k++)
{
double
projection;
/*
Rotating calipers.
*/
projection=getProjection(&vertices[p],&vertices[q],&vertices[k]);
min_projection=MagickMin(min_projection,projection);
max_projection=MagickMax(max_projection,projection);
}
area=min_diameter*(max_projection-min_projection);
if (caliper_info.area > area)
{
caliper_info.area=area;
caliper_info.width=min_diameter;
caliper_info.height=max_projection-min_projection;
caliper_info.projection=max_projection;
caliper_info.p=p;
caliper_info.q=q;
caliper_info.v=v;
}
}
/*
Initialize minimum bounding box.
*/
diameter=getFeretDiameter(&vertices[caliper_info.p],
&vertices[caliper_info.q],&vertices[caliper_info.v]);
angle=atan2(vertices[caliper_info.q].y-vertices[caliper_info.p].y,
vertices[caliper_info.q].x-vertices[caliper_info.p].x);
bounding_box[0].x=vertices[caliper_info.p].x+cos(angle)*
caliper_info.projection;
bounding_box[0].y=vertices[caliper_info.p].y+sin(angle)*
caliper_info.projection;
bounding_box[1].x=floor(bounding_box[0].x+cos(angle+MagickPI/2.0)*diameter+
0.5);
bounding_box[1].y=floor(bounding_box[0].y+sin(angle+MagickPI/2.0)*diameter+
0.5);
bounding_box[2].x=floor(bounding_box[1].x+cos(angle)*(-caliper_info.height)+
0.5);
bounding_box[2].y=floor(bounding_box[1].y+sin(angle)*(-caliper_info.height)+
0.5);
bounding_box[3].x=floor(bounding_box[2].x+cos(angle+MagickPI/2.0)*(-diameter)+
0.5);
bounding_box[3].y=floor(bounding_box[2].y+sin(angle+MagickPI/2.0)*(-diameter)+
0.5);
/*
Export minimum bounding box properties.
*/
(void) FormatImageProperty(image,"minimum-bounding-box:area","%.*g",
GetMagickPrecision(),caliper_info.area);
(void) FormatImageProperty(image,"minimum-bounding-box:width","%.*g",
GetMagickPrecision(),caliper_info.width);
(void) FormatImageProperty(image,"minimum-bounding-box:height","%.*g",
GetMagickPrecision(),caliper_info.height);
(void) FormatImageProperty(image,"minimum-bounding-box:_p","%.*g,%.*g",
GetMagickPrecision(),vertices[caliper_info.p].x,
GetMagickPrecision(),vertices[caliper_info.p].y);
(void) FormatImageProperty(image,"minimum-bounding-box:_q","%.*g,%.*g",
GetMagickPrecision(),vertices[caliper_info.q].x,
GetMagickPrecision(),vertices[caliper_info.q].y);
(void) FormatImageProperty(image,"minimum-bounding-box:_v","%.*g,%.*g",
GetMagickPrecision(),vertices[caliper_info.v].x,
GetMagickPrecision(),vertices[caliper_info.v].y);
/*
Find smallest angle to origin.
*/
distance=hypot(bounding_box[0].x,bounding_box[0].y);
angle=getAngle(&bounding_box[0],&bounding_box[1]);
for (i=1; i < 4; i++)
{
double d = hypot(bounding_box[i].x,bounding_box[i].y);
if (d < distance)
{
distance=d;
angle=getAngle(&bounding_box[i],&bounding_box[(i+1) % 4]);
}
}
artifact=GetImageArtifact(image,"minimum-bounding-box:orientation");
if (artifact != (const char *) NULL)
{
double
length,
q_length,
p_length;
PointInfo
delta,
point;
/*
Find smallest perpendicular distance from edge to origin.
*/
point=bounding_box[0];
for (i=1; i < 4; i++)
{
if (bounding_box[i].x < point.x)
point.x=bounding_box[i].x;
if (bounding_box[i].y < point.y)
point.y=bounding_box[i].y;
}
for (i=0; i < 4; i++)
{
bounding_box[i].x-=point.x;
bounding_box[i].y-=point.y;
}
for (i=0; i < 4; i++)
{
double
d,
intercept,
slope;
delta.x=bounding_box[(i+1) % 4].x-bounding_box[i].x;
delta.y=bounding_box[(i+1) % 4].y-bounding_box[i].y;
slope=delta.y*PerceptibleReciprocal(delta.x);
intercept=bounding_box[(i+1) % 4].y-slope*bounding_box[i].x;
d=fabs((slope*bounding_box[i].x-bounding_box[i].y+intercept)*
PerceptibleReciprocal(sqrt(slope*slope+1.0)));
if ((i == 0) || (d < distance))
{
distance=d;
point=delta;
}
}
angle=RadiansToDegrees(atan(point.y*PerceptibleReciprocal(point.x)));
length=hypot(point.x,point.y);
p_length=fabs((double) MagickMax(caliper_info.width,caliper_info.height)-
length);
q_length=fabs(length-(double) MagickMin(caliper_info.width,
caliper_info.height));
if (LocaleCompare(artifact,"landscape") == 0)
{
if (p_length > q_length)
angle+=(angle < 0.0) ? 90.0 : -90.0;
}
else
if (LocaleCompare(artifact,"portrait") == 0)
{
if (p_length < q_length)
angle+=(angle >= 0.0) ? 90.0 : -90.0;
}
}
(void) FormatImageProperty(image,"minimum-bounding-box:angle","%.*g",
GetMagickPrecision(),angle);
(void) FormatImageProperty(image,"minimum-bounding-box:unrotate","%.*g",
GetMagickPrecision(),-angle);
vertices=(PointInfo *) RelinquishMagickMemory(vertices);
return(bounding_box);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ImageType GetImageType(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->colorspace == CMYKColorspace)
{
if (image->alpha_trait == UndefinedPixelTrait)
return(ColorSeparationType);
return(ColorSeparationAlphaType);
}
if (IsImageMonochrome(image) != MagickFalse)
return(BilevelType);
if (IsImageGray(image) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(GrayscaleAlphaType);
return(GrayscaleType);
}
if (IsPaletteImage(image) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(PaletteAlphaType);
return(PaletteType);
}
if (image->alpha_trait != UndefinedPixelTrait)
return(TrueColorAlphaType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageGray() returns grayscale if all the pixels in the image have
% the same red, green, and blue intensities, and bi-level is the intensity is
% either 0 or QuantumRange. Otherwise undefined is returned.
%
% The format of the IdentifyImageGray method is:
%
% ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageGray(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
const Quantum
*p;
ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleAlphaType))
return(image->type);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(UndefinedType);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelGray(image,p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) &&
(IsPixelMonochrome(image,p) == MagickFalse))
type=GrayscaleType;
p+=GetPixelChannels(image);
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait))
type=GrayscaleAlphaType;
return(type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image
% have the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IdentifyImageMonochrome method is:
%
% MagickBooleanType IdentifyImageMonochrome(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
bilevel;
ssize_t
x;
const Quantum
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
bilevel=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelMonochrome(image,p) == MagickFalse)
{
bilevel=MagickFalse;
break;
}
p+=GetPixelChannels(image);
}
if (bilevel == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
return(bilevel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,IdentifyImageType(image,exception),exception);
%
% The format of the IdentifyImageType method is:
%
% ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageType(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->alpha_trait == UndefinedPixelTrait)
return(ColorSeparationType);
return(ColorSeparationAlphaType);
}
if (IdentifyImageMonochrome(image,exception) != MagickFalse)
return(BilevelType);
if (IdentifyImageGray(image,exception) != UndefinedType)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(GrayscaleAlphaType);
return(GrayscaleType);
}
if (IdentifyPaletteImage(image,exception) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(PaletteAlphaType);
return(PaletteType);
}
if (image->alpha_trait != UndefinedPixelTrait)
return(TrueColorAlphaType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageGray() returns MagickTrue if the type of the image is grayscale or
% bi-level.
%
% The format of the IsImageGray method is:
%
% MagickBooleanType IsImageGray(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageGray(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleAlphaType))
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageMonochrome() returns MagickTrue if type of the image is bi-level.
%
% The format of the IsImageMonochrome method is:
%
% MagickBooleanType IsImageMonochrome(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageMonochrome(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type == BilevelType)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O p a q u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageOpaque() returns MagickTrue if none of the pixels in the image have
% an alpha value other than OpaqueAlpha (QuantumRange).
%
% Will return true immediatally is alpha channel is not available.
%
% The format of the IsImageOpaque method is:
%
% MagickBooleanType IsImageOpaque(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImageOpaque(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
const Quantum
*p;
ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelAlpha(image,p) != OpaqueAlpha)
break;
p+=GetPixelChannels(image);
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageDepth() sets the depth of the image.
%
% The format of the SetImageDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (depth >= MAGICKCORE_QUANTUM_DEPTH)
{
image->depth=depth;
return(MagickTrue);
}
range=GetQuantumRange(depth);
if (image->storage_class == PseudoClass)
{
ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].red),range),range);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].green),range),range);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].blue),range),range);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].alpha),range),range);
}
}
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((1UL*QuantumRange) <= MaxMap)
{
Quantum
*depth_map;
ssize_t
i;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (Quantum *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range),
range);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=depth_map[ScaleQuantumToMap(q[i])];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
depth_map=(Quantum *) RelinquishMagickMemory(depth_map);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
#endif
/*
Scale pixels to desired depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType)
q[i]),range),range);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageType() sets the type of image. Choose from these types:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
% OptimizeType
%
% The format of the SetImageType method is:
%
% MagickBooleanType SetImageType(Image *image,const ImageType type,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: Image type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type,
ExceptionInfo *exception)
{
const char
*artifact;
ImageInfo
*image_info;
MagickBooleanType
status;
QuantizeInfo
*quantize_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
image_info=AcquireImageInfo();
image_info->dither=image->dither;
artifact=GetImageArtifact(image,"dither");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"dither",artifact);
switch (type)
{
case BilevelType:
{
status=TransformImageColorspace(image,GRAYColorspace,exception);
(void) NormalizeImage(image,exception);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=2;
quantize_info->colorspace=GRAYColorspace;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case GrayscaleType:
{
status=TransformImageColorspace(image,GRAYColorspace,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case GrayscaleAlphaType:
{
status=TransformImageColorspace(image,GRAYColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case PaletteType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if ((image->storage_class == DirectClass) || (image->colors > 256))
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=256;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->alpha_trait=UndefinedPixelTrait;
break;
}
case PaletteBilevelAlphaType:
{
ChannelType
channel_mask;
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
channel_mask=SetImageChannelMask(image,AlphaChannel);
(void) BilevelImage(image,(double) QuantumRange/2.0,exception);
(void) SetImageChannelMask(image,channel_mask);
quantize_info=AcquireQuantizeInfo(image_info);
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case PaletteAlphaType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->colorspace=TransparentColorspace;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case TrueColorType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case TrueColorAlphaType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case ColorSeparationType:
{
status=TransformImageColorspace(image,CMYKColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case ColorSeparationAlphaType:
{
status=TransformImageColorspace(image,CMYKColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case OptimizeType:
case UndefinedType:
break;
}
image_info=DestroyImageInfo(image_info);
if (status == MagickFalse)
return(status);
image->type=type;
return(MagickTrue);
}
|
rand.c | /* Copyright 2013. The Regents of the University of California.
* Copyright 2021. Uecker Lab. University Center Göttingen.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors: Martin Uecker, Dara Bahri, Moritz Blumenthal
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <math.h>
#include <complex.h>
#ifdef _WIN32
#include "win/rand_r.h"
#endif
#include "num/multind.h"
#ifdef USE_CUDA
#include "num/gpuops.h"
#endif
#include "rand.h"
unsigned int num_rand_seed = 123;
void num_rand_init(unsigned int seed)
{
num_rand_seed = seed;
}
double uniform_rand(void)
{
double ret;
#pragma omp critical
ret = rand_r(&num_rand_seed) / (double)RAND_MAX;
return ret;
}
/**
* Box-Muller
*/
complex double gaussian_rand(void)
{
double u1, u2, s;
do {
u1 = 2. * uniform_rand() - 1.;
u2 = 2. * uniform_rand() - 1.;
s = u1 * u1 + u2 * u2;
} while (s > 1.);
double re = sqrt(-2. * log(s) / s) * u1;
double im = sqrt(-2. * log(s) / s) * u2;
return re + 1.i * im;
}
void md_gaussian_rand(unsigned int D, const long dims[D], complex float* dst)
{
#ifdef USE_CUDA
if (cuda_ondevice(dst)) {
complex float* tmp = md_alloc(D, dims, sizeof(complex float));
md_gaussian_rand(D, dims, tmp);
md_copy(D, dims, dst, tmp, sizeof(complex float));
md_free(tmp);
return;
}
#endif
//#pragma omp parallel for
for (long i = 0; i < md_calc_size(D, dims); i++)
dst[i] = (complex float)gaussian_rand();
}
void md_uniform_rand(unsigned int D, const long dims[D], complex float* dst)
{
#ifdef USE_CUDA
if (cuda_ondevice(dst)) {
complex float* tmp = md_alloc(D, dims, sizeof(complex float));
md_uniform_rand(D, dims, tmp);
md_copy(D, dims, dst, tmp, sizeof(complex float));
md_free(tmp);
return;
}
#endif
for (long i = 0; i < md_calc_size(D, dims); i++)
dst[i] = (complex float)uniform_rand();
}
void md_rand_one(unsigned int D, const long dims[D], complex float* dst, double p)
{
#ifdef USE_CUDA
if (cuda_ondevice(dst)) {
complex float* tmp = md_alloc(D, dims, sizeof(complex float));
md_rand_one(D, dims, tmp, p);
md_copy(D, dims, dst, tmp, sizeof(complex float));
md_free(tmp);
return;
}
#endif
for (long i = 0; i < md_calc_size(D, dims); i++)
dst[i] = (complex float)(uniform_rand() < p);
}
|
17_blur_parallel_best.c | #include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <omp.h>
#define NX 1002
#define NY 1002
void blur(int *image, size_t szx, size_t szy, size_t iters){
int *temp = malloc(sizeof(int) * szx * szy);
for (size_t i = 0; i< NX*NY; ++i) temp[i]=image[i];
#pragma omp parallel
{
for (size_t iit = 0; iit < iters; ++iit){
#pragma omp for
for (size_t ix = 1; ix< szx-1; ++ix){
for (size_t iy = 1; iy< szy-1; ++iy){
temp[iy + ix * szy] = (int)(0.25 * (float)(image[iy + (ix+1) * szy] +
image[iy + (ix-1) * szy] + image[(iy-1) + ix * szy] +
image[(iy+1) + ix * szy]) + 0.5);
}
}
#pragma omp for
for (size_t i = 0; i < (szx * szy); ++i){
image[i] = temp[i];
}
}
}
free(temp);
}
int main(){
int image[(NX)*(NY)];
struct timespec t1, t2;
float dtime;
for (size_t i = 0; i< NX*NY; ++i) image[i]=5;
printf("OpenMP code running on %i threads\n",omp_get_max_threads());
clock_gettime(CLOCK_REALTIME, &t1);
blur(image,NX,NY, 10000);
clock_gettime(CLOCK_REALTIME, &t2);
dtime = (float)(t2.tv_sec - t1.tv_sec) + ((float)(t2.tv_nsec - t1.tv_nsec)
/1.0e9);
printf("Time taken was %f seconds\n",dtime);
printf("Arbitrary value from image %i\n",image[100]);
printf("Arbitrary value printed to avoid compiler optimising the blur out\n");
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 8;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(3*t1,2)),ceild(24*t2-Nz+5,8)),3*t1-3*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(12*t1+Ny+15,8)),floord(24*t2+Ny+11,8)),floord(24*t1-24*t2+Nz+Ny+13,8));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-126,128)),ceild(3*t1-254,256)),ceild(24*t2-Nz-1011,1024)),ceild(8*t3-Ny-1011,1024));t4<=min(min(min(min(floord(4*Nt+Nx-9,1024),floord(12*t1+Nx+15,1024)),floord(24*t2+Nx+11,1024)),floord(8*t3+Nx-5,1024)),floord(24*t1-24*t2+Nz+Nx+13,1024));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),2*t3),Nt-1),3*t1+5),6*t2+4),256*t4+254);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) {
lbv=max(1024*t4,4*t5+4);
ubv=min(1024*t4+1023,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
beta_projectors.h | // Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file beta_projectors.h
*
* \brief Contains declaration and implementation of sirius::Beta_projectors class.
*/
#ifndef __BETA_PROJECTORS_H__
#define __BETA_PROJECTORS_H__
#include "communicator.hpp"
#include "Unit_cell/unit_cell.h"
#include "wave_functions.hpp"
#include "simulation_context.h"
#include "beta_projectors_base.h"
namespace sirius {
/// Stores <G+k | beta> expansion
class Beta_projectors: public Beta_projectors_base<1>
{
protected:
/// Generate plane-wave coefficients for beta-projectors of atom types.
void generate_pw_coefs_t(std::vector<int>& igk__)
{
PROFILE("sirius::Beta_projectors::generate_pw_coefs_t");
if (!num_beta_t()) {
return;
}
auto& comm = gkvec_.comm();
auto& beta_radial_integrals = ctx_.beta_ri();
std::vector<double_complex> z(ctx_.unit_cell().lmax() + 1);
for (int l = 0; l <= ctx_.unit_cell().lmax(); l++) {
z[l] = std::pow(double_complex(0, -1), l) * fourpi / std::sqrt(ctx_.unit_cell().omega());
}
/* compute <G+k|beta> */
#pragma omp parallel for
for (int igkloc = 0; igkloc < num_gkvec_loc(); igkloc++) {
int igk = igk__[igkloc];
/* vs = {r, theta, phi} */
auto vs = SHT::spherical_coordinates(gkvec_.gkvec_cart(igk));
/* compute real spherical harmonics for G+k vector */
std::vector<double> gkvec_rlm(Utils::lmmax(ctx_.unit_cell().lmax()));
SHT::spherical_harmonics(ctx_.unit_cell().lmax(), vs[1], vs[2], &gkvec_rlm[0]);
for (int iat = 0; iat < ctx_.unit_cell().num_atom_types(); iat++) {
auto& atom_type = ctx_.unit_cell().atom_type(iat);
/* get all values of radial integrals */
auto ri_val = beta_radial_integrals.values(iat, vs[0]);
for (int xi = 0; xi < atom_type.mt_basis_size(); xi++) {
int l = atom_type.indexb(xi).l;
int lm = atom_type.indexb(xi).lm;
int idxrf = atom_type.indexb(xi).idxrf;
pw_coeffs_t_[0](igkloc, atom_type.offset_lo() + xi) = z[l] * gkvec_rlm[lm] * ri_val(idxrf);
}
}
}
if (ctx_.control().print_checksum_) {
auto c1 = pw_coeffs_t_[0].checksum();
comm.allreduce(&c1, 1);
if (comm.rank() == 0) {
print_checksum("beta_pw_coeffs_t", c1);
}
}
if (ctx_.processing_unit() == GPU) {
/* beta projectors for atom types will be stored on GPU for the entire run */
reallocate_pw_coeffs_t_on_gpu_ = false;
pw_coeffs_t_[0].allocate(memory_t::device);
pw_coeffs_t_[0].copy<memory_t::host, memory_t::device>();
}
}
public:
Beta_projectors(Simulation_context& ctx__,
Gvec const& gkvec__,
std::vector<int>& igk__)
: Beta_projectors_base<1>(ctx__, gkvec__, igk__)
{
PROFILE("sirius::Beta_projectors::Beta_projectors");
generate_pw_coefs_t(igk__);
}
void generate(int chunk__)
{
Beta_projectors_base<1>::generate(chunk__, 0);
}
};
} // namespace
#endif
|
watchpoint_support.c | //
// WatchPointDriver.cpp
//
//
// Created by Milind Chabbi on 2/21/17.
//
//
#if !defined(_GNU_SOURCE)
#define _GNU_SOURCE
#endif
#include <asm/unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
#include <linux/kernel.h>
#include <signal.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <ucontext.h>
#include <unistd.h>
#include <sys/mman.h>
#include <stdint.h>
#include <stdbool.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <strings.h>
#include <asm/prctl.h>
#include <sys/prctl.h>
#include "common.h"
#include <hpcrun/main.h>
#include <hpcrun/hpcrun_options.h>
#include <hpcrun/write_data.h>
#include <hpcrun/safe-sampling.h>
#include <hpcrun/hpcrun_stats.h>
#include <hpcrun/memory/mmap.h>
#include <hpcrun/cct/cct.h>
#include <hpcrun/metrics.h>
#include <hpcrun/sample_event.h>
#include <hpcrun/sample_sources_registered.h>
#include <hpcrun/thread_data.h>
#include <hpcrun/trace.h>
#include <lush/lush-backtrace.h>
#include <messages/messages.h>
#include <utilities/tokenize.h>
#include <utilities/arch/context-pc.h>
#include <unwind/common/unwind.h>
#include "watchpoint_support.h"
#include <unwind/x86-family/x86-misc.h>
#define MAX_WP_SLOTS (5)
#define IS_ALIGNED(address, alignment) (! ((size_t)(address) & (alignment-1)))
#define ADDRESSES_OVERLAP(addr1, len1, addr2, len2) (((addr1)+(len1) > (addr2)) && ((addr2)+(len2) > (addr1) ))
#define CACHE_LINE_SIZE (64)
//#define ALT_STACK_SZ (4 * SIGSTKSZ)
#define ALT_STACK_SZ ((1L<<20) > 4 * SIGSTKSZ? (1L<<20): 4* SIGSTKSZ)
//#define TEST
#ifdef TEST
#define EMSG(...) fprintf(stderr, __VA_ARGS__)
#define hpcrun_abort() abort()
#define hpcrun_safe_exit() (1)
#define hpcrun_safe_enter() (1)
#define hpcrun_context_pc(context) (0)
#define get_previous_instruction(ip, pip) (0)
#define get_mem_access_length_and_type(a, b, c) (0)
#endif
#if defined(PERF_EVENT_IOC_UPDATE_BREAKPOINT)
#define FAST_BP_IOC_FLAG (PERF_EVENT_IOC_UPDATE_BREAKPOINT)
#elif defined(PERF_EVENT_IOC_MODIFY_ATTRIBUTES)
#define FAST_BP_IOC_FLAG (PERF_EVENT_IOC_MODIFY_ATTRIBUTES)
#else
#endif
#define CHECK(x) ({int err = (x); \
if (err) { \
EMSG("%s: Failed with %d on line %d of file %s\n", strerror(errno), err, __LINE__, __FILE__); \
monitor_real_abort(); }\
err;})
#define HANDLE_ERROR_IF_ANY(val, expected, errstr) {if (val != expected) {perror(errstr); abort();}}
#define SAMPLES_POST_FULL_RESET_VAL (1)
WPConfig_t wpConfig;
//const WatchPointInfo_t dummyWPInfo = {.sample = {}, .startTime =0, .fileHandle= -1, .isActive= false, .mmapBuffer=0};
//const struct DUMMY_WATCHPOINT dummyWP[MAX_WP_SLOTS];
// Data structure that is given by clients to set a WP
typedef struct ThreadData{
int lbrDummyFD __attribute__((aligned(CACHE_LINE_SZ)));
stack_t ss;
void * fs_reg_val;
void * gs_reg_val;
long numWatchpointTriggers;
long numWatchpointImpreciseIP;
long numWatchpointImpreciseAddressArbitraryLength;
long numWatchpointImpreciseAddress8ByteLength;
long numSampleTriggeringWatchpoints;
long numWatchpointDropped;
long numInsaneIP;
struct drand48_data randBuffer;
WatchPointInfo_t watchPointArray[MAX_WP_SLOTS];
WatchPointUpCall_t fptr;
char dummy[CACHE_LINE_SZ];
} ThreadData_t;
static __thread ThreadData_t tData;
bool IsAltStackAddress(void *addr){
if((addr >= tData.ss.ss_sp) && (addr < tData.ss.ss_sp + tData.ss.ss_size))
return true;
return false;
}
bool IsFSorGS(void * addr) {
if (tData.fs_reg_val == (void *) -1) {
syscall(SYS_arch_prctl, ARCH_GET_FS, &tData.fs_reg_val);
syscall(SYS_arch_prctl, ARCH_GET_GS, &tData.gs_reg_val);
}
// 4096 smallest one page size
if ( (tData.fs_reg_val <= addr) && (addr < tData.fs_reg_val + 4096))
return true;
if ( (tData.gs_reg_val <= addr) && (addr < tData.gs_reg_val + 4096))
return true;
return false;
}
/********* OS SUPPORT ****************/
// perf-util.h has it
static long perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu, int group_fd, unsigned long flags) {
return syscall(__NR_perf_event_open, hw_event, pid, cpu, group_fd, flags);
}
pid_t gettid() {
return syscall(__NR_gettid);
}
static inline void EnableWatchpoint(int fd) {
// Start the event
CHECK(ioctl(fd, PERF_EVENT_IOC_ENABLE, 0));
}
static inline void DisableWatchpoint(WatchPointInfo_t *wpi) {
// Stop the event
assert(wpi->fileHandle != -1);
CHECK(ioctl(wpi->fileHandle, PERF_EVENT_IOC_DISABLE, 0));
wpi->isActive = false;
}
static void * MAPWPMBuffer(int fd){
void * buf = mmap(0, 2 * wpConfig.pgsz, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (buf == MAP_FAILED) {
EMSG("Failed to mmap : %s\n", strerror(errno));
monitor_real_abort();
}
return buf;
}
static void UNMAPWPMBuffer(void * buf){
CHECK(munmap(buf, 2 * wpConfig.pgsz));
}
static int OnWatchPoint(int signum, siginfo_t *info, void *context);
__attribute__((constructor))
static void InitConfig(){
tData.fptr = NULL;
volatile int dummyWP[MAX_WP_SLOTS];
wpConfig.isLBREnabled = true;
struct perf_event_attr peLBR = {
.type = PERF_TYPE_BREAKPOINT,
.size = sizeof(struct perf_event_attr),
.bp_type = HW_BREAKPOINT_W,
.bp_len = HW_BREAKPOINT_LEN_1,
.bp_addr = (uintptr_t)&dummyWP[0],
.sample_period = 1,
.precise_ip = 0 /* arbitraty skid */,
.sample_type = 0,
.exclude_user = 0,
.exclude_kernel = 1,
.exclude_hv = 1,
.disabled = 0, /* enabled */
};
int fd = perf_event_open(&peLBR, 0, -1, -1 /*group*/, 0);
if (fd != -1) {
wpConfig.isLBREnabled = true;
} else {
wpConfig.isLBREnabled = false;
}
CHECK(close(fd));
#if defined(FAST_BP_IOC_FLAG)
wpConfig.isWPModifyEnabled = true;
#else
wpConfig.isWPModifyEnabled = false;
#endif
//wpConfig.signalDelivered = SIGTRAP;
//wpConfig.signalDelivered = SIGIO;
//wpConfig.signalDelivered = SIGUSR1;
wpConfig.signalDelivered = SIGRTMIN + 3;
// Setup the signal handler
sigset_t block_mask;
sigfillset(&block_mask);
// Set a signal handler for SIGUSR1
struct sigaction sa1 = {
.sa_sigaction = OnWatchPoint,
.sa_mask = block_mask,
.sa_flags = SA_SIGINFO | SA_RESTART | SA_NODEFER | SA_ONSTACK
};
if(monitor_sigaction(wpConfig.signalDelivered, OnWatchPoint, 0 /*flags*/, &sa1) == -1) {
fprintf(stderr, "Failed to set WHICH_SIG handler: %s\n", strerror(errno));
monitor_real_abort();
}
wpConfig.pgsz = sysconf(_SC_PAGESIZE);
// identify max WP supported by the architecture
volatile int wpHandles[MAX_WP_SLOTS];
int i = 0;
for(; i < MAX_WP_SLOTS; i++){
struct perf_event_attr pe = {
.type = PERF_TYPE_BREAKPOINT,
.size = sizeof(struct perf_event_attr),
.bp_type = HW_BREAKPOINT_W,
.bp_len = HW_BREAKPOINT_LEN_1,
.bp_addr = (uintptr_t)&dummyWP[i],
.sample_period = 1,
.precise_ip = 0 /* arbitraty skid */,
.sample_type = 0,
.exclude_user = 0,
.exclude_kernel = 1,
.exclude_hv = 1,
.disabled = 0, /* enabled */
};
wpHandles[i] = perf_event_open(&pe, 0, -1, -1 /*group*/, 0);
if (wpHandles[i] == -1) {
break;
}
}
if(i == 0) {
fprintf(stderr, "Cannot create a single watch point\n");
monitor_real_abort();
}
for (int j = 0 ; j < i; j ++) {
CHECK(close(wpHandles[j]));
}
wpConfig.maxWP = i;
// Should we get the floating point type in an access?
wpConfig.getFloatType = false;
// Get the replacement scheme
char * replacementScheme = getenv("HPCRUN_WP_REPLACEMENT_SCHEME");
if(replacementScheme){
if(0 == strcasecmp(replacementScheme, "AUTO")) {
wpConfig.replacementPolicy = AUTO;
} else if (0 == strcasecmp(replacementScheme, "OLDEST")) {
wpConfig.replacementPolicy = OLDEST;
} else if (0 == strcasecmp(replacementScheme, "NEWEST")) {
wpConfig.replacementPolicy = NEWEST;
} else {
// default;
wpConfig.replacementPolicy = AUTO;
}
} else {
// default;
wpConfig.replacementPolicy = AUTO;
}
// Should we fix IP off by one?
char * fixIP = getenv("HPCRUN_WP_DONT_FIX_IP");
if(fixIP){
if(0 == strcasecmp(fixIP, "1")) {
wpConfig.dontFixIP = true;
} if (0 == strcasecmp(fixIP, "true")) {
wpConfig.dontFixIP = true;
} else {
// default;
wpConfig.dontFixIP = false;
}
} else {
// default;
wpConfig.dontFixIP = false;
}
// Should we get the address in a WP trigger?
char * disassembleWPAddress = getenv("HPCRUN_WP_DONT_DISASSEMBLE_TRIGGER_ADDRESS");
if(disassembleWPAddress){
if(0 == strcasecmp(disassembleWPAddress, "1")) {
wpConfig.dontDisassembleWPAddress = true;
} if (0 == strcasecmp(disassembleWPAddress, "true")) {
wpConfig.dontDisassembleWPAddress = true;
} else {
// default;
wpConfig.dontDisassembleWPAddress = false;
}
} else {
// default;
wpConfig.dontDisassembleWPAddress = false;
}
}
void RedSpyWPConfigOverride(void *v){
wpConfig.getFloatType = true;
}
void LoadSpyWPConfigOverride(void *v){
wpConfig.getFloatType = true;
}
void FalseSharingWPConfigOverride(void *v){
// replacement policy is OLDEST forced.
wpConfig.replacementPolicy = OLDEST;
}
void TrueSharingWPConfigOverride(void *v){
// replacement policy is OLDEST forced.
wpConfig.replacementPolicy = OLDEST;
}
void AllSharingWPConfigOverride(void *v){
// replacement policy is OLDEST forced.
wpConfig.replacementPolicy = OLDEST;
}
void IPCFalseSharingWPConfigOverride(void *v){
// replacement policy is OLDEST forced.
wpConfig.replacementPolicy = OLDEST;
}
void IPCTrueSharingWPConfigOverride(void *v){
// replacement policy is OLDEST forced.
wpConfig.replacementPolicy = OLDEST;
}
void IPCAllSharingWPConfigOverride(void *v){
// replacement policy is OLDEST forced.
wpConfig.replacementPolicy = OLDEST;
}
void TemporalReuseWPConfigOverride(void *v){
// dont fix IP
wpConfig.dontFixIP = true;
wpConfig.dontDisassembleWPAddress = true;
}
void SpatialReuseWPConfigOverride(void *v){
// dont fix IP
wpConfig.dontFixIP = true;
wpConfig.dontDisassembleWPAddress = true;
}
static void CreateWatchPoint(WatchPointInfo_t * wpi, SampleData_t * sampleData, bool modify) {
// Perf event settings
struct perf_event_attr pe = {
.type = PERF_TYPE_BREAKPOINT,
.size = sizeof(struct perf_event_attr),
// .bp_type = HW_BREAKPOINT_W,
// .bp_len = HW_BREAKPOINT_LEN_4,
.sample_period = 1,
.precise_ip = wpConfig.isLBREnabled? 2 /*precise_ip 0 skid*/ : 0 /* arbitraty skid */,
.sample_type = (PERF_SAMPLE_IP),
.exclude_user = 0,
.exclude_kernel = 1,
.exclude_hv = 1,
.disabled = 0, /* enabled */
};
switch (sampleData->wpLength) {
case 1: pe.bp_len = HW_BREAKPOINT_LEN_1; break;
case 2: pe.bp_len = HW_BREAKPOINT_LEN_2; break;
case 4: pe.bp_len = HW_BREAKPOINT_LEN_4; break;
case 8: pe.bp_len = HW_BREAKPOINT_LEN_8; break;
default:
EMSG("Unsupported .bp_len %d: %s\n", wpi->sample.wpLength,strerror(errno));
monitor_real_abort();
}
pe.bp_addr = (uintptr_t)sampleData->va;
switch (sampleData->type) {
case WP_READ: pe.bp_type = HW_BREAKPOINT_R; break;
case WP_WRITE: pe.bp_type = HW_BREAKPOINT_W; break;
default: pe.bp_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
}
#if defined(FAST_BP_IOC_FLAG)
if(modify) {
// modification
assert(wpi->fileHandle != -1);
assert(wpi->mmapBuffer != 0);
//DisableWatchpoint(wpi);
CHECK(ioctl(wpi->fileHandle, FAST_BP_IOC_FLAG, (unsigned long) (&pe)));
//if(wpi->isActive == false) {
//EnableWatchpoint(wpi->fileHandle);
//}
} else
#endif
{
// fresh creation
// Create the perf_event for this thread on all CPUs with no event group
int perf_fd = perf_event_open(&pe, 0, -1, -1 /*group*/, 0);
if (perf_fd == -1) {
EMSG("Failed to open perf event file: %s\n",strerror(errno));
monitor_real_abort();
}
// Set the perf_event file to async mode
CHECK(fcntl(perf_fd, F_SETFL, fcntl(perf_fd, F_GETFL, 0) | O_ASYNC));
// Tell the file to send a signal when an event occurs
CHECK(fcntl(perf_fd, F_SETSIG, wpConfig.signalDelivered));
// Deliver the signal to this thread
struct f_owner_ex fown_ex;
fown_ex.type = F_OWNER_TID;
fown_ex.pid = gettid();
int ret = fcntl(perf_fd, F_SETOWN_EX, &fown_ex);
if (ret == -1){
EMSG("Failed to set the owner of the perf event file: %s\n", strerror(errno));
return;
}
// CHECK(fcntl(perf_fd, F_SETOWN, gettid()));
wpi->fileHandle = perf_fd;
// mmap the file if lbr is enabled
if(wpConfig.isLBREnabled) {
wpi->mmapBuffer = MAPWPMBuffer(perf_fd);
}
}
wpi->isActive = true;
wpi->va = (void *) pe.bp_addr;
wpi->sample = *sampleData;
wpi->startTime = rdtsc();
}
/* create a dummy PERF_TYPE_HARDWARE event that will never fire */
static void CreateDummyHardwareEvent(void) {
// Perf event settings
struct perf_event_attr pe = {
.type = PERF_TYPE_HARDWARE,
.size = sizeof(struct perf_event_attr),
.config = PERF_COUNT_HW_CACHE_MISSES,
.sample_period = 0x7fffffffffffffff, /* some insanely large sample period */
.precise_ip = 2,
.sample_type = PERF_SAMPLE_BRANCH_STACK,
.exclude_user = 0,
.exclude_kernel = 1,
.exclude_hv = 1,
.branch_sample_type = PERF_SAMPLE_BRANCH_ANY,
};
// Create the perf_event for this thread on all CPUs with no event group
int perf_fd = perf_event_open(&pe, 0, -1, -1, 0);
if (perf_fd == -1) {
EMSG("Failed to open perf event file: %s\n", strerror(errno));
monitor_real_abort();
}
tData.lbrDummyFD = perf_fd;
}
static void CloseDummyHardwareEvent(int perf_fd){
CHECK(close(perf_fd));
}
/*********** Client interfaces *******/
static void DisArm(WatchPointInfo_t * wpi){
// assert(wpi->isActive);
assert(wpi->fileHandle != -1);
if(wpi->mmapBuffer)
UNMAPWPMBuffer(wpi->mmapBuffer);
wpi->mmapBuffer = 0;
CHECK(close(wpi->fileHandle));
wpi->fileHandle = -1;
wpi->isActive = false;
}
static bool ArmWatchPoint(WatchPointInfo_t * wpi, SampleData_t * sampleData) {
// if WP modification is suppoted use it
if(wpConfig.isWPModifyEnabled){
// Does not matter whether it was active or not.
// If it was not active, enable it.
if(wpi->fileHandle != -1) {
CreateWatchPoint(wpi, sampleData, true);
return true;
}
}
// disable the old WP if active
if(wpi->isActive) {
DisArm(wpi);
}
CreateWatchPoint(wpi, sampleData, false);
return true;
}
// Per thread initialization
void WatchpointThreadInit(WatchPointUpCall_t func){
tData.ss.ss_sp = malloc(ALT_STACK_SZ);
if (tData.ss.ss_sp == NULL){
EMSG("Failed to malloc ALT_STACK_SZ");
monitor_real_abort();
}
tData.ss.ss_size = ALT_STACK_SZ;
tData.ss.ss_flags = 0;
if (sigaltstack(&tData.ss, NULL) == -1){
EMSG("Failed sigaltstack");
monitor_real_abort();
}
tData.lbrDummyFD = -1;
tData.fptr = func;
tData.fs_reg_val = (void*)-1;
tData.gs_reg_val = (void*)-1;
srand48_r(time(NULL), &tData.randBuffer);
tData.numWatchpointTriggers = 0;
tData.numWatchpointImpreciseIP = 0;
tData.numWatchpointImpreciseAddressArbitraryLength = 0;
tData.numWatchpointImpreciseAddress8ByteLength = 0;
tData.numWatchpointDropped = 0;
tData.numSampleTriggeringWatchpoints = 0;
tData.numInsaneIP = 0;
for (int i=0; i<wpConfig.maxWP; i++) {
tData.watchPointArray[i].isActive = false;
tData.watchPointArray[i].fileHandle = -1;
tData.watchPointArray[i].startTime = 0;
tData.watchPointArray[i].samplePostFull = SAMPLES_POST_FULL_RESET_VAL;
}
//if LBR is supported create a dummy PERF_TYPE_HARDWARE for Linux workaround
if(wpConfig.isLBREnabled) {
CreateDummyHardwareEvent();
}
}
void WatchpointThreadTerminate(){
for (int i = 0; i < wpConfig.maxWP; i++) {
if(tData.watchPointArray[i].fileHandle != -1) {
DisArm(&tData.watchPointArray[i]);
}
}
if(tData.lbrDummyFD != -1) {
CloseDummyHardwareEvent(tData.lbrDummyFD);
tData.lbrDummyFD = -1;
}
tData.fs_reg_val = (void*)-1;
tData.gs_reg_val = (void*)-1;
hpcrun_stats_num_watchpoints_triggered_inc(tData.numWatchpointTriggers);
hpcrun_stats_num_watchpoints_imprecise_inc(tData.numWatchpointImpreciseIP);
hpcrun_stats_num_watchpoints_imprecise_address_inc(tData.numWatchpointImpreciseAddressArbitraryLength);
hpcrun_stats_num_watchpoints_imprecise_address_8_byte_inc(tData.numWatchpointImpreciseAddress8ByteLength);
hpcrun_stats_num_insane_ip_inc(tData.numInsaneIP);
hpcrun_stats_num_watchpoints_dropped_inc(tData.numWatchpointDropped);
hpcrun_stats_num_sample_triggering_watchpoints_inc(tData.numSampleTriggeringWatchpoints);
#if 0
tData.ss.ss_flags = SS_DISABLE;
if (sigaltstack(&tData.ss, NULL) == -1){
EMSG("Failed sigaltstack WatchpointThreadTerminate");
// no need to abort , just leak the memory
// monitor_real_abort();
} else {
if(tData.ss.ss_sp)
free(tData.ss.ss_sp);
}
#endif
}
// Finds a victim slot to set a new WP
static VictimType GetVictim(int * location, ReplacementPolicy policy){
// If any WP slot is inactive, return it;
for(int i = 0; i < wpConfig.maxWP; i++){
if(!tData.watchPointArray[i].isActive) {
*location = i;
// Increase samplePostFull for those who survived.
for(int rest = 0; rest < wpConfig.maxWP; rest++){
if (tData.watchPointArray[rest].isActive) {
tData.watchPointArray[rest].samplePostFull++;
}
}
return EMPTY_SLOT;
}
}
switch (policy) {
case AUTO:{
// Shuffle the visit order
int slots[MAX_WP_SLOTS];
for(int i = 0; i < wpConfig.maxWP; i++)
slots[i] = i;
// Shuffle
for(int i = 0; i < wpConfig.maxWP; i++){
long int randVal;
lrand48_r(&tData.randBuffer, &randVal);
randVal = randVal % wpConfig.maxWP;
int tmp = slots[i];
slots[i] = slots[randVal];
slots[randVal] = tmp;
}
// attempt to replace each WP with its own probability
for(int i = 0; i < wpConfig.maxWP; i++) {
int loc = slots[i];
double probabilityToReplace = 1.0/(1.0 + (double)tData.watchPointArray[loc].samplePostFull);
double randValue;
drand48_r(&tData.randBuffer, &randValue);
// update tData.samplePostFull
tData.watchPointArray[loc].samplePostFull++;
if(randValue <= probabilityToReplace) {
*location = loc;
// TODO: Milind: Not sure whether I should increment samplePostFull of the remainiing slots.
// In Qingsen's experiments, doing this not hurt.
for(int rest = i+1; rest < wpConfig.maxWP; rest++){
tData.watchPointArray[slots[rest]].samplePostFull++;
}
return NON_EMPTY_SLOT;
}
}
// this is an indication not to replace, but if the client chooses to force, they can
*location = slots[0] /*random value*/;
return NONE_AVAILABLE;
}
break;
case NEWEST:{
// Always replace the newest
int64_t newestTime = 0;
for(int i = 0; i < wpConfig.maxWP; i++){
if(newestTime < tData.watchPointArray[i].startTime) {
*location = i;
newestTime = tData.watchPointArray[i].startTime;
}
}
return NON_EMPTY_SLOT;
}
break;
case OLDEST:{
// Always replace the oldest
int64_t oldestTime = INT64_MAX;
for(int i = 0; i < wpConfig.maxWP; i++){
if(oldestTime > tData.watchPointArray[i].startTime) {
*location = i;
oldestTime = tData.watchPointArray[i].startTime;
}
}
return NON_EMPTY_SLOT;
}
break;
case EMPTY_SLOT_ONLY:{
return NONE_AVAILABLE;
}
break;
default:
return NONE_AVAILABLE;
}
// No unarmed WP slot found.
}
static inline void
rmb(void) {
asm volatile("lfence":::"memory");
}
static void ConsumeAllRingBufferData(void *mbuf) {
struct perf_event_mmap_page *hdr = (struct perf_event_mmap_page *)mbuf;
unsigned long tail;
size_t avail_sz;
size_t pgmsk = wpConfig.pgsz - 1;
/*
* data points to beginning of buffer payload
*/
void * data = ((void *)hdr) + wpConfig.pgsz;
/*
* position of tail within the buffer payload
*/
tail = hdr->data_tail & pgmsk;
/*
* size of what is available
*
* data_head, data_tail never wrap around
*/
avail_sz = hdr->data_head - hdr->data_tail;
rmb();
#if 0
if(avail_sz == 0 )
EMSG("\n avail_sz = %d\n", avail_sz);
else
EMSG("\n EEavail_sz = %d\n", avail_sz);
#endif
// reset tail to head
hdr->data_tail = hdr->data_head;
}
static int ReadMampBuffer(void *mbuf, void *buf, size_t sz) {
struct perf_event_mmap_page *hdr = (struct perf_event_mmap_page *)mbuf;
void *data;
unsigned long tail;
size_t avail_sz, m, c;
size_t pgmsk = wpConfig.pgsz - 1;
/*
* data points to beginning of buffer payload
*/
data = ((void *)hdr) + wpConfig.pgsz;
/*
* position of tail within the buffer payload
*/
tail = hdr->data_tail & pgmsk;
/*
* size of what is available
*
* data_head, data_tail never wrap around
*/
avail_sz = hdr->data_head - hdr->data_tail;
if (sz > avail_sz) {
printf("\n sz > avail_sz: sz = %lu, avail_sz = %lu\n", sz, avail_sz);
rmb();
return -1;
}
/* From perf_event_open() manpage */
rmb();
/*
* sz <= avail_sz, we can satisfy the request
*/
/*
* c = size till end of buffer
*
* buffer payload size is necessarily
* a power of two, so we can do:
*/
c = pgmsk + 1 - tail;
/*
* min with requested size
*/
m = c < sz ? c : sz;
/* copy beginning */
memcpy(buf, data + tail, m);
/*
* copy wrapped around leftover
*/
if (sz > m)
memcpy(buf + m, data, sz - m);
hdr->data_tail += sz;
return 0;
}
void
SkipBuffer(struct perf_event_mmap_page *hdr, size_t sz){
if ((hdr->data_tail + sz) > hdr->data_head)
sz = hdr->data_head - hdr->data_tail;
rmb();
hdr->data_tail += sz;
}
static inline bool IsPCSane(void * contextPC, void *possiblePC){
if( (possiblePC==0) || ((possiblePC > contextPC) || (contextPC-possiblePC > 15))){
return false;
}
return true;
}
double ProportionOfWatchpointAmongOthersSharingTheSameContext(WatchPointInfo_t *wpi){
#if 0
int share = 0;
for(int i = 0; i < wpConfig.maxWP; i++) {
if(tData.watchPointArray[i].isActive && tData.watchPointArray[i].sample.node == wpi->sample.node) {
share ++;
}
}
assert(share > 0);
return 1.0/share;
#else
return 1.0;
#endif
}
static inline void * GetPatchedIP(void * contextIP) {
void * patchedIP;
void * excludeList[MAX_WP_SLOTS] = {0};
int numExcludes = 0;
for(int idx = 0; idx < wpConfig.maxWP; idx++){
if(tData.watchPointArray[idx].isActive) {
excludeList[numExcludes]=tData.watchPointArray[idx].va;
numExcludes++;
}
}
get_previous_instruction(contextIP, &patchedIP, excludeList, numExcludes);
return patchedIP;
}
// Gather all useful data when a WP triggers
static bool CollectWatchPointTriggerInfo(WatchPointInfo_t * wpi, WatchPointTrigger_t *wpt, void * context){
//struct perf_event_mmap_page * b = wpi->mmapBuffer;
struct perf_event_header hdr;
if (ReadMampBuffer(wpi->mmapBuffer, &hdr, sizeof(struct perf_event_header)) < 0) {
EMSG("Failed to ReadMampBuffer: %s\n", strerror(errno));
monitor_real_abort();
}
switch(hdr.type) {
case PERF_RECORD_SAMPLE:
assert (hdr.type & PERF_SAMPLE_IP);
void * contextIP = hpcrun_context_pc(context);
void * preciseIP = (void *)-1;
void * patchedIP = (void *)-1;
void * reliableIP = (void *)-1;
void * addr = (void *)-1;
if (hdr.type & PERF_SAMPLE_IP){
if (ReadMampBuffer(wpi->mmapBuffer, &preciseIP, sizeof(uint64_t)) < 0) {
EMSG("Failed to ReadMampBuffer: %s\n", strerror(errno));
monitor_real_abort();
}
if(! (hdr.misc & PERF_RECORD_MISC_EXACT_IP)){
//EMSG("PERF_SAMPLE_IP imprecise\n");
tData.numWatchpointImpreciseIP ++;
if(wpConfig.dontFixIP == false) {
patchedIP = GetPatchedIP(contextIP);
if(!IsPCSane(contextIP, patchedIP)) {
//EMSG("get_previous_instruction failed \n");
tData.numInsaneIP ++;
goto ErrExit;
}
reliableIP = patchedIP;
} else {
// Fake as requested by Xu for reuse clients
reliableIP = contextIP-1;
}
//EMSG("PERF_SAMPLE_IP imprecise: %p patched to %p in WP handler\n", tmpIP, patchedIP);
} else {
#if 0 // Precise PC can be far away in jump/call instructions.
// Ensure the "precise" PC is within one instruction from context pc
if(!IsPCSane(contextIP, preciseIP)) {
tData.numInsaneIP ++;
//EMSG("get_previous_instruction failed \n");
goto ErrExit;
}
#endif
reliableIP = preciseIP;
//if(! ((ip <= tmpIP) && (tmpIP-ip < 20))) ConsumeAllRingBufferData(wpi->mmapBuffer);
//assert( (ip <= tmpIP) && (tmpIP-ip < 20));
}
} else {
// Should happen only for wpConfig.isLBREnabled==false
assert(wpConfig.isLBREnabled==false);
// Fall back to old scheme of disassembling and capturing the info
if(wpConfig.dontFixIP == false) {
patchedIP = GetPatchedIP(contextIP);
if(!IsPCSane(contextIP, patchedIP)) {
tData.numInsaneIP ++;
//EMSG("PERF_SAMPLE_IP imprecise: %p failed to patch in WP handler, WP dropped\n", tmpIP);
goto ErrExit;
}
reliableIP = patchedIP;
}else {
// Fake as requested by Xu for reuse clients
reliableIP = contextIP-1;
}
}
wpt->pc = reliableIP;
if(wpConfig.dontDisassembleWPAddress == false){
FloatType * floatType = wpConfig.getFloatType? &wpt->floatType : 0;
if(false == get_mem_access_length_and_type_address(wpt->pc, (uint32_t*) &(wpt->accessLength), &(wpt->accessType), floatType, context, &addr)){
//EMSG("WP triggered on a non Load/Store add = %p\n", wpt->pc);
goto ErrExit;
}
if (wpt->accessLength == 0) {
//EMSG("WP triggered 0 access length! at pc=%p\n", wpt->pc);
goto ErrExit;
}
void * patchedAddr = (void *)-1;
// Stack affecting addresses will be off by 8
// Some instructions affect the address computing register: mov (%rax),%eax
// Hence, if the addresses do NOT overlap, merely use the Sample address!
if(false == ADDRESSES_OVERLAP(addr, wpt->accessLength, wpi->va, wpi->sample.wpLength)) {
if ((wpt->accessLength == sizeof(void *)) && (wpt->accessLength == wpi->sample.wpLength) && (((addr - wpi->va) == sizeof(void *)) || ((wpi->va - addr) == sizeof(void *))))
tData.numWatchpointImpreciseAddress8ByteLength ++;
else
tData.numWatchpointImpreciseAddressArbitraryLength ++;
tData.numWatchpointImpreciseAddressArbitraryLength ++;
patchedAddr = wpi->va;
} else {
patchedAddr = addr;
}
wpt->va = patchedAddr;
} else {
wpt->va = (void *)-1;
}
wpt->ctxt = context;
// We must cleanup the mmap buffer if there is any data left
ConsumeAllRingBufferData(wpi->mmapBuffer);
return true;
case PERF_RECORD_EXIT:
EMSG("PERF_RECORD_EXIT sample type %d sz=%d\n", hdr.type, hdr.size);
//SkipBuffer(wpi->mmapBuffer , hdr.size - sizeof(hdr));
goto ErrExit;
case PERF_RECORD_LOST:
EMSG("PERF_RECORD_LOST sample type %d sz=%d\n", hdr.type, hdr.size);
//SkipBuffer(wpi->mmapBuffer , hdr.size - sizeof(hdr));
goto ErrExit;
case PERF_RECORD_THROTTLE:
EMSG("PERF_RECORD_THROTTLE sample type %d sz=%d\n", hdr.type, hdr.size);
//SkipBuffer(wpi->mmapBuffer , hdr.size - sizeof(hdr));
goto ErrExit;
case PERF_RECORD_UNTHROTTLE:
EMSG("PERF_RECORD_UNTHROTTLE sample type %d sz=%d\n", hdr.type, hdr.size);
//SkipBuffer(wpi->mmapBuffer , hdr.size - sizeof(hdr));
goto ErrExit;
default:
EMSG("unknown sample type %d sz=%d\n", hdr.type, hdr.size);
//SkipBuffer(wpi->mmapBuffer , hdr.size - sizeof(hdr));
goto ErrExit;
}
ErrExit:
// We must cleanup the mmap buffer if there is any data left
ConsumeAllRingBufferData(wpi->mmapBuffer);
return false;
}
void DisableWatchpointWrapper(WatchPointInfo_t *wpi){
if(wpConfig.isWPModifyEnabled) {
DisableWatchpoint(wpi);
} else {
DisArm(wpi);
}
}
static int OnWatchPoint(int signum, siginfo_t *info, void *context){
//volatile int x;
//fprintf(stderr, "OnWatchPoint=%p\n", &x);
// Disable HPCRUN sampling
// if the trap is already in hpcrun, return
// If the interrupt came from inside our code, then drop the sample
// and return and avoid any MSG.
void* pc = hpcrun_context_pc(context);
if (!hpcrun_safe_enter_async(pc)) return 0;
linux_perf_events_pause();
tData.numWatchpointTriggers++;
//fprintf(stderr, " numWatchpointTriggers = %lu, \n", tData.numWatchpointTriggers);
//find which watchpoint fired
int location = -1;
for(int i = 0 ; i < wpConfig.maxWP; i++) {
if((tData.watchPointArray[i].isActive) && (info->si_fd == tData.watchPointArray[i].fileHandle)) {
location = i;
break;
}
}
// Ensure it is an active WP
if(location == -1) {
EMSG("\n WP trigger did not match any known active WP\n");
//monitor_real_abort();
hpcrun_safe_exit();
linux_perf_events_resume();
//fprintf("\n WP trigger did not match any known active WP\n");
return 0;
}
WatchPointTrigger_t wpt;
WPTriggerActionType retVal;
WatchPointInfo_t *wpi = &tData.watchPointArray[location];
// Perform Pre watchpoint action
switch (wpi->sample.preWPAction) {
case DISABLE_WP:
DisableWatchpointWrapper(wpi);
break;
case DISABLE_ALL_WP:
for(int i = 0; i < wpConfig.maxWP; i++) {
if(tData.watchPointArray[i].isActive){
DisableWatchpointWrapper(&tData.watchPointArray[i]);
}
}
break;
default:
assert(0 && "NYI");
monitor_real_abort();
break;
}
if( false == CollectWatchPointTriggerInfo(wpi, &wpt, context)) {
tData.numWatchpointDropped++;
retVal = DISABLE_WP; // disable if unable to collect any info.
} else {
retVal = tData.fptr(wpi, 0, wpt.accessLength/* invalid*/, &wpt);
}
// Let the client take action.
switch (retVal) {
case DISABLE_WP: {
if(wpi->isActive){
DisableWatchpointWrapper(wpi);
}
// Reset per WP probability
wpi->samplePostFull = SAMPLES_POST_FULL_RESET_VAL;
}
break;
case DISABLE_ALL_WP: {
for(int i = 0; i < wpConfig.maxWP; i++) {
if(tData.watchPointArray[i].isActive){
DisableWatchpointWrapper(&tData.watchPointArray[i]);
}
// Reset per WP probability
tData.watchPointArray[i].samplePostFull = SAMPLES_POST_FULL_RESET_VAL;
}
}
break;
case ALREADY_DISABLED: { // Already disabled, perhaps in pre-WP action
assert(wpi->isActive == false);
// Reset per WP probability
wpi->samplePostFull = SAMPLES_POST_FULL_RESET_VAL;
}
break;
case RETAIN_WP: { // resurrect this wp
if(!wpi->isActive){
EnableWatchpoint(wpi->fileHandle);
wpi->isActive = true;
}
}
break;
default: // Retain the state
break;
}
// hpcrun_all_sources_start();
linux_perf_events_resume();
hpcrun_safe_exit();
return 0;
}
static bool ValidateWPData(SampleData_t * sampleData){
// Check alignment
#if defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(__amd64)
switch (sampleData->wpLength) {
case 0: EMSG("\nValidateWPData: 0 length WP never allowed"); monitor_real_abort();
case 1:
case 2:
case 4:
case 8:
if(IS_ALIGNED(sampleData->va, sampleData->wpLength))
return true; // unaligned
else
return false;
break;
default:
EMSG("Unsuppported WP length %d", sampleData->wpLength);
monitor_real_abort();
return false; // unsupported alignment
}
#else
#error "unknown architecture"
#endif
}
static bool IsOveralpped(SampleData_t * sampleData){
// Is a WP with the same/overlapping address active?
for (int i = 0; i < wpConfig.maxWP; i++) {
if(tData.watchPointArray[i].isActive){
if(ADDRESSES_OVERLAP(tData.watchPointArray[i].sample.va, tData.watchPointArray[i].sample.wpLength, sampleData->va, sampleData->wpLength)){
return true;
}
}
}
return false;
}
void CaptureValue(SampleData_t * sampleData, WatchPointInfo_t * wpi){
void * valLoc = & (wpi->value[0]);
switch(sampleData->wpLength) {
default: // force 1 length
case 1: *((uint8_t*)valLoc) = *(uint8_t*)(sampleData->va); break;
case 2: *((uint16_t*)valLoc) = *(uint16_t*)(sampleData->va); break;
case 4: *((uint32_t*)valLoc) = *(uint32_t*)(sampleData->va); break;
case 8: *((uint64_t*)valLoc) = *(uint64_t*)(sampleData->va); break;
}
}
bool SubscribeWatchpoint(SampleData_t * sampleData, OverwritePolicy overwritePolicy, bool captureValue){
if(ValidateWPData(sampleData) == false) {
return false;
}
if(IsOveralpped(sampleData)){
return false; // drop the sample if it overlaps an existing address
}
// No overlap, look for a victim slot
int victimLocation = -1;
// Find a slot to install WP
VictimType r = GetVictim(&victimLocation, wpConfig.replacementPolicy);
if(r != NONE_AVAILABLE) {
// VV IMP: Capture value before arming the WP.
if(captureValue)
CaptureValue(sampleData, &tData.watchPointArray[victimLocation]);
// I know the error case that we have captured the value but ArmWatchPoint fails.
// I am not handling that corner case because ArmWatchPoint() will fail with a monitor_real_abort().
if(ArmWatchPoint(&tData.watchPointArray[victimLocation], sampleData) == false){
//LOG to hpcrun log
EMSG("ArmWatchPoint failed for address %p", sampleData->va);
return false;
}
return true;
}
return false;
}
#ifdef TEST
#include<omp.h>
__thread volatile int cnt;
WPUpCallTRetType Test1UpCall(WatchPointInfo_t * wp, WatchPointTrigger_t * wt) {
printf("\n Test1UpCall %p\n", wt->va);
if(wpConfig.isLBREnabled)
assert(wp->sample.va == wt->va);
cnt ++;
return DISABLE;
}
void TestBasic(){
tData.fptr = Test1UpCall;
sigset_t block_mask;
sigemptyset (&block_mask);
// Set a signal handler for SIGUSR1
struct sigaction sa1 = {
.sa_sigaction = OnWatchPoint,
// .sa_mask = block_mask,
.sa_flags = SA_SIGINFO | SA_RESTART | SA_NODEFER
};
if(sigaction(wpConfig.signalDelivered, &sa1, NULL) == -1) {
fprintf(stderr, "Failed to set WHICH_SIG handler: %s\n", strerror(errno));
monitor_real_abort();
}
WatchpointThreadInit();
int N = 10000;
volatile int dummyWPLocation[10000];
cnt = 0;
for(int i = 0 ; i < N; i++) {
SampleData_t s = {.va = &dummyWPLocation[i], .wpLength = sizeof(int), .type = WP_WRITE};
SubscribeWatchpoint(&s, AUTO);
}
for(int i = 0 ; i < N; i++) {
dummyWPLocation[i]++;
}
printf("\n cnt = %d\n", cnt);
assert(cnt == wpConfig.maxWP);
WatchpointThreadTerminate();
}
int main() {
printf("\n Test 1: single threaded");
while(1) {
#pragma omp parallel
{
TestBasic();
}
}
return 0;
}
#endif
|
GB_unaryop__ainv_int32_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int32_int16
// op(A') function: GB_tran__ainv_int32_int16
// C type: int32_t
// A type: int16_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT32 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int32_int16
(
int32_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int32_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pr66633-1.c | /* PR middle-end/66633 */
/* { dg-do compile } */
/* { dg-options "-fopenmp -O1" } */
void baz (int (*) ());
void
foo (void)
{
int i;
auto int bar (void) { return i; }
#pragma omp parallel
baz (bar);
}
|
LAGraph_BF_full.c | //------------------------------------------------------------------------------
// LAGraph_BF_full.c: Bellman-Ford single-source shortest paths, returns tree
//------------------------------------------------------------------------------
// LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved.
// SPDX-License-Identifier: BSD-2-Clause
//
// See additional acknowledgments in the LICENSE file,
// or contact permission@sei.cmu.edu for the full terms.
//------------------------------------------------------------------------------
// LAGraph_BF_full: Bellman-Ford single source shortest paths, returning both
// the path lengths and the shortest-path tree. contributed by Jinhao Chen and
// Tim Davis, Texas A&M.
// LAGraph_BF_full performs a Bellman-Ford to find out shortest path, parent
// nodes along the path and the hops (number of edges) in the path from given
// source vertex s in the range of [0, n) on graph given as matrix A with size
// n*n. The sparse matrix A has entry A(i, j) if there is an edge from vertex i
// to vertex j with weight w, then A(i, j) = w. Furthermore, LAGraph_BF_full
// requires A(i, i) = 0 for all 0 <= i < n.
// LAGraph_BF_full returns GrB_SUCCESS if successful, and GrB_NO_VALUE if it
// detects the existence of negative- weight cycle. The GrB_Vector d(k), pi(k)
// and h(k) (i.e., *pd_output, *ppi_output and *ph_output respectively) will
// be NULL when negative-weight cycle detected. Otherwise, the vector d has
// d(k) as the shortest distance from s to k. pi(k) = p+1, where p is the
// parent node of k-th node in the shortest path. In particular, pi(s) = 0.
// h(k) = hop(s, k), the number of edges from s to k in the shortest path.
//------------------------------------------------------------------------------
#define LAGraph_FREE_ALL \
{ \
GrB_free(&d); \
GrB_free(&dtmp); \
GrB_free(&Atmp); \
GrB_free(&BF_Tuple3); \
GrB_free(&BF_lMIN_Tuple3); \
GrB_free(&BF_PLUSrhs_Tuple3); \
GrB_free(&BF_EQ_Tuple3); \
GrB_free(&BF_lMIN_Tuple3_Monoid); \
GrB_free(&BF_lMIN_PLUSrhs_Tuple3); \
LAGraph_Free ((void**)&I); \
LAGraph_Free ((void**)&J); \
LAGraph_Free ((void**)&w); \
LAGraph_Free ((void**)&W); \
LAGraph_Free ((void**)&h); \
LAGraph_Free ((void**)&pi); \
}
#include <LAGraph.h>
#include <LAGraphX.h>
#include <LG_internal.h> // from src/utility
typedef void (*LAGraph_binary_function) (void *, const void *, const void *) ;
//------------------------------------------------------------------------------
// data type for each entry of the adjacent matrix A and "distance" vector d;
// <INFINITY,INFINITY,INFINITY> corresponds to nonexistence of a path, and
// the value <0, 0, NULL> corresponds to a path from a vertex to itself
//------------------------------------------------------------------------------
typedef struct
{
double w; // w corresponds to a path weight.
GrB_Index h; // h corresponds to a path size or number of hops.
GrB_Index pi;// pi corresponds to the penultimate vertex along a path.
// vertex indexed as 1, 2, 3, ... , V, and pi = 0 (as nil)
// for u=v, and pi = UINT64_MAX (as inf) for (u,v) not in E
}
BF_Tuple3_struct;
//------------------------------------------------------------------------------
// 2 binary functions, z=f(x,y), where Tuple3xTuple3 -> Tuple3
//------------------------------------------------------------------------------
void BF_lMIN
(
BF_Tuple3_struct *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
if (x->w < y->w
|| (x->w == y->w && x->h < y->h)
|| (x->w == y->w && x->h == y->h && x->pi < y->pi))
{
if (z != x) { *z = *x; }
}
else
{
*z = *y;
}
}
void BF_PLUSrhs
(
BF_Tuple3_struct *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
z->w = x->w + y->w ;
z->h = x->h + y->h ;
z->pi = (x->pi != UINT64_MAX && y->pi != 0) ? y->pi : x->pi ;
}
void BF_EQ
(
bool *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
(*z) = (x->w == y->w && x->h == y->h && x->pi == y->pi) ;
}
// Given a n-by-n adjacency matrix A and a source vertex s.
// If there is no negative-weight cycle reachable from s, return the distances
// of shortest paths from s and parents along the paths as vector d. Otherwise,
// returns d=NULL if there is a negtive-weight cycle.
// pd_output is pointer to a GrB_Vector, where the i-th entry is d(s,i), the
// sum of edges length in the shortest path
// ppi_output is pointer to a GrB_Vector, where the i-th entry is pi(i), the
// parent of i-th vertex in the shortest path
// ph_output is pointer to a GrB_Vector, where the i-th entry is h(s,i), the
// number of edges from s to i in the shortest path
// A has zeros on diagonal and weights on corresponding entries of edges
// s is given index for source vertex
GrB_Info LAGraph_BF_full
(
GrB_Vector *pd_output, //the pointer to the vector of distance
GrB_Vector *ppi_output, //the pointer to the vector of parent
GrB_Vector *ph_output, //the pointer to the vector of hops
const GrB_Matrix A, //matrix for the graph
const GrB_Index s //given index of the source
)
{
GrB_Info info;
char *msg = NULL ;
// tmp vector to store distance vector after n (i.e., V) loops
GrB_Vector d = NULL, dtmp = NULL;
GrB_Matrix Atmp = NULL;
GrB_Type BF_Tuple3;
GrB_BinaryOp BF_lMIN_Tuple3;
GrB_BinaryOp BF_PLUSrhs_Tuple3;
GrB_BinaryOp BF_EQ_Tuple3;
GrB_Monoid BF_lMIN_Tuple3_Monoid;
GrB_Semiring BF_lMIN_PLUSrhs_Tuple3;
GrB_Index nrows, ncols, n, nz; // n = # of row/col, nz = # of nnz in graph
GrB_Index *I = NULL, *J = NULL; // for col/row indices of entries from A
GrB_Index *h = NULL, *pi = NULL;
double *w = NULL;
BF_Tuple3_struct *W = NULL;
LG_CHECK (A == NULL || pd_output == NULL ||
ppi_output == NULL || ph_output == NULL, -1001, "inputs are NULL") ;
*pd_output = NULL;
*ppi_output = NULL;
*ph_output = NULL;
GrB_TRY (GrB_Matrix_nrows (&nrows, A)) ;
GrB_TRY (GrB_Matrix_ncols (&ncols, A)) ;
GrB_TRY (GrB_Matrix_nvals (&nz, A));
LG_CHECK (nrows != ncols, -1002, "A must be square") ;
n = nrows;
LG_CHECK (s >= n || s < 0, -1003, "invalid source node") ;
//--------------------------------------------------------------------------
// create all GrB_Type GrB_BinaryOp GrB_Monoid and GrB_Semiring
//--------------------------------------------------------------------------
// GrB_Type
GrB_TRY (GrB_Type_new(&BF_Tuple3, sizeof(BF_Tuple3_struct)));
// GrB_BinaryOp
GrB_TRY (GrB_BinaryOp_new(&BF_EQ_Tuple3,
(LAGraph_binary_function) (&BF_EQ), GrB_BOOL, BF_Tuple3, BF_Tuple3));
GrB_TRY (GrB_BinaryOp_new(&BF_lMIN_Tuple3,
(LAGraph_binary_function) (&BF_lMIN), BF_Tuple3, BF_Tuple3, BF_Tuple3));
GrB_TRY (GrB_BinaryOp_new(&BF_PLUSrhs_Tuple3,
(LAGraph_binary_function)(&BF_PLUSrhs),
BF_Tuple3, BF_Tuple3, BF_Tuple3));
// GrB_Monoid
BF_Tuple3_struct BF_identity = (BF_Tuple3_struct) { .w = INFINITY,
.h = UINT64_MAX, .pi = UINT64_MAX };
GrB_TRY (GrB_Monoid_new_UDT(&BF_lMIN_Tuple3_Monoid, BF_lMIN_Tuple3,
&BF_identity));
//GrB_Semiring
GrB_TRY (GrB_Semiring_new(&BF_lMIN_PLUSrhs_Tuple3,
BF_lMIN_Tuple3_Monoid, BF_PLUSrhs_Tuple3));
//--------------------------------------------------------------------------
// allocate arrays used for tuplets
//--------------------------------------------------------------------------
I = LAGraph_Malloc (nz, sizeof(GrB_Index)) ;
J = LAGraph_Malloc (nz, sizeof(GrB_Index)) ;
w = LAGraph_Malloc (nz, sizeof(double)) ;
W = LAGraph_Malloc (nz, sizeof(BF_Tuple3_struct)) ;
LG_CHECK (I == NULL || J == NULL || w == NULL || W == NULL,
-1004, "out of memory") ;
//--------------------------------------------------------------------------
// create matrix Atmp based on A, while its entries become BF_Tuple3 type
//--------------------------------------------------------------------------
GrB_TRY (GrB_Matrix_extractTuples_FP64(I, J, w, &nz, A));
int nthreads;
LAGRAPH_OK (LAGraph_GetNumThreads (&nthreads, NULL)) ;
printf ("nthreads %d\n", nthreads) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (GrB_Index k = 0; k < nz; k++)
{
if (w[k] == 0) //diagonal entries
{
W[k] = (BF_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 };
}
else
{
W[k] = (BF_Tuple3_struct) { .w = w[k], .h = 1, .pi = I[k] + 1 };
}
}
GrB_TRY (GrB_Matrix_new(&Atmp, BF_Tuple3, n, n));
GrB_TRY (GrB_Matrix_build_UDT(Atmp, I, J, W, nz, BF_lMIN_Tuple3));
//--------------------------------------------------------------------------
// create and initialize "distance" vector d
//--------------------------------------------------------------------------
GrB_TRY (GrB_Vector_new(&d, BF_Tuple3, n));
// initial distance from s to itself
BF_Tuple3_struct d0 = (BF_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 };
GrB_TRY (GrB_Vector_setElement_UDT(d, &d0, s));
//--------------------------------------------------------------------------
// start the Bellman Ford process
//--------------------------------------------------------------------------
// copy d to dtmp in order to create a same size of vector
GrB_TRY (GrB_Vector_dup(&dtmp, d));
bool same= false; // variable indicating if d == dtmp
int64_t iter = 0; // number of iterations
// terminate when no new path is found or more than V-1 loops
while (!same && iter < n - 1)
{
// execute semiring on d and A, and save the result to dtmp
GrB_TRY (GrB_vxm(dtmp, GrB_NULL, GrB_NULL, BF_lMIN_PLUSrhs_Tuple3,
d, Atmp, GrB_NULL));
LAGRAPH_OK (LAGraph_Vector_IsEqual_op(&same, dtmp, d, BF_EQ_Tuple3, NULL));
if (!same)
{
GrB_Vector ttmp = dtmp;
dtmp = d;
d = ttmp;
}
iter ++;
}
// check for negative-weight cycle only when there was a new path in the
// last loop, otherwise, there can't be a negative-weight cycle.
if (!same)
{
// execute semiring again to check for negative-weight cycle
GrB_TRY (GrB_vxm(dtmp, GrB_NULL, GrB_NULL, BF_lMIN_PLUSrhs_Tuple3,
d, Atmp, GrB_NULL));
// if d != dtmp, then there is a negative-weight cycle in the graph
LAGRAPH_OK (LAGraph_Vector_IsEqual_op(&same, dtmp, d, BF_EQ_Tuple3, NULL));
if (!same)
{
// printf("A negative-weight cycle found. \n");
LAGraph_FREE_ALL;
return (GrB_NO_VALUE) ;
}
}
//--------------------------------------------------------------------------
// extract tuple from "distance" vector d and create GrB_Vectors for output
//--------------------------------------------------------------------------
GrB_TRY (GrB_Vector_extractTuples_UDT (I, (void *) W, &nz, d));
h = LAGraph_Malloc (nz, sizeof(GrB_Index)) ;
pi = LAGraph_Malloc (nz, sizeof(GrB_Index)) ;
LG_CHECK (w == NULL || h == NULL || pi == NULL, -1004, "out of memory") ;
for (GrB_Index k = 0; k < nz; k++)
{
w [k] = W[k].w ;
h [k] = W[k].h ;
pi[k] = W[k].pi;
}
GrB_TRY (GrB_Vector_new(pd_output, GrB_FP64, n));
GrB_TRY (GrB_Vector_new(ppi_output, GrB_UINT64, n));
GrB_TRY (GrB_Vector_new(ph_output, GrB_UINT64, n));
GrB_TRY (GrB_Vector_build_FP64 (*pd_output , I, w , nz,GrB_MIN_FP64 ));
GrB_TRY (GrB_Vector_build_UINT64(*ppi_output, I, pi, nz,GrB_MIN_UINT64));
GrB_TRY (GrB_Vector_build_UINT64(*ph_output , I, h , nz,GrB_MIN_UINT64));
LAGraph_FREE_ALL;
return (GrB_SUCCESS) ;
}
|
common.h | #ifndef DEPTH_SEGMENTATION_COMMON_H_
#define DEPTH_SEGMENTATION_COMMON_H_
#ifdef _OPENMP
#include <omp.h>
#endif
#include <string>
#include <vector>
#include <glog/logging.h>
#include <opencv2/highgui.hpp>
#include <opencv2/rgbd.hpp>
#include <opencv2/viz/vizcore.hpp>
namespace depth_segmentation {
struct Segment {
std::vector<cv::Vec3f> points;
std::vector<cv::Vec3f> normals;
std::vector<cv::Vec3f> original_colors;
std::set<size_t> label;
std::set<size_t> instance_label;
std::set<size_t> semantic_label;
};
const static std::string kDebugWindowName = "DebugImages";
constexpr bool kUseTracker = false;
enum class SurfaceNormalEstimationMethod {
kFals = cv::rgbd::RgbdNormals::RGBD_NORMALS_METHOD_FALS,
kLinemod = cv::rgbd::RgbdNormals::RGBD_NORMALS_METHOD_LINEMOD,
kSri = cv::rgbd::RgbdNormals::RGBD_NORMALS_METHOD_SRI,
kDepthWindowFilter = 3,
};
struct SurfaceNormalParams {
SurfaceNormalParams() {
CHECK_EQ(window_size % 2u, 1u);
CHECK_GT(window_size, 1u);
if (method != SurfaceNormalEstimationMethod::kDepthWindowFilter) {
CHECK_LT(window_size, 8u);
}
}
size_t window_size = 13u;
SurfaceNormalEstimationMethod method =
SurfaceNormalEstimationMethod::kDepthWindowFilter;
bool display = false;
double distance_factor_threshold = 0.05;
};
struct MaxDistanceMapParams {
MaxDistanceMapParams() { CHECK_EQ(window_size % 2u, 1u); }
bool use_max_distance = true;
size_t window_size = 1u;
bool display = false;
bool exclude_nan_as_max_distance = false;
bool ignore_nan_coordinates = false; // TODO(ff): This probably doesn't make
// a lot of sense -> consider removing
// it.
bool use_threshold = true;
double noise_thresholding_factor = 10.0;
double sensor_noise_param_1st_order = 0.0012; // From Nguyen et al. (2012)
double sensor_noise_param_2nd_order = 0.0019; // From Nguyen et al. (2012)
double sensor_noise_param_3rd_order = 0.0001; // From Nguyen et al. (2012)
double sensor_min_distance = 0.02;
};
struct DepthDiscontinuityMapParams {
DepthDiscontinuityMapParams() { CHECK_EQ(kernel_size % 2u, 1u); }
bool use_discontinuity = true;
size_t kernel_size = 3u;
double discontinuity_ratio = 0.01;
bool display = false;
};
struct MinConvexityMapParams {
MinConvexityMapParams() { CHECK_EQ(window_size % 2u, 1u); }
bool use_min_convexity = true;
size_t morphological_opening_size = 1u;
size_t window_size = 5u;
size_t step_size = 1u;
bool display = false;
bool use_morphological_opening = true;
bool use_threshold = true;
double threshold = 0.97;
double mask_threshold = -0.0005;
};
struct FinalEdgeMapParams {
size_t morphological_opening_size = 1u;
size_t morphological_closing_size = 1u;
bool use_morphological_opening = true;
bool use_morphological_closing = true;
bool display = false;
};
enum class LabelMapMethod {
kFloodFill = 0,
kContour = 1,
};
struct LabelMapParams {
LabelMapMethod method = LabelMapMethod::kContour;
size_t min_size = 500u;
bool use_inpaint = false;
size_t inpaint_method = 0u;
bool display = true;
};
struct SemanticInstanceSegmentationParams {
bool enable = false;
float overlap_threshold = 0.8f;
};
struct IsNan {
template <class T>
bool operator()(T const& p) const {
return std::isnan(p);
}
};
struct IsNotNan {
template <class T>
bool operator()(T const& p) const {
return !std::isnan(p);
}
};
struct Params {
bool dilate_depth_image = false;
size_t dilation_size = 1u;
FinalEdgeMapParams final_edge;
LabelMapParams label;
DepthDiscontinuityMapParams depth_discontinuity;
MaxDistanceMapParams max_distance;
MinConvexityMapParams min_convexity;
SurfaceNormalParams normals;
SemanticInstanceSegmentationParams semantic_instance_segmentation;
bool visualize_segmented_scene = false;
};
void visualizeDepthMap(const cv::Mat& depth_map, cv::viz::Viz3d* viz_3d) {
CHECK(!depth_map.empty());
CHECK_EQ(depth_map.type(), CV_32FC3);
CHECK_NOTNULL(viz_3d);
viz_3d->setBackgroundColor(cv::viz::Color::gray());
viz_3d->showWidget("cloud",
cv::viz::WCloud(depth_map, cv::viz::Color::red()));
viz_3d->showWidget("coo", cv::viz::WCoordinateSystem(1.5));
viz_3d->spinOnce(0, true);
}
void visualizeDepthMapWithNormals(const cv::Mat& depth_map,
const cv::Mat& normals,
cv::viz::Viz3d* viz_3d) {
CHECK(!depth_map.empty());
CHECK_EQ(depth_map.type(), CV_32FC3);
CHECK(!normals.empty());
CHECK_EQ(normals.type(), CV_32FC3);
CHECK_EQ(depth_map.size(), normals.size());
CHECK_NOTNULL(viz_3d);
viz_3d->setBackgroundColor(cv::viz::Color::gray());
viz_3d->showWidget("cloud",
cv::viz::WCloud(depth_map, cv::viz::Color::red()));
viz_3d->showWidget("normals",
cv::viz::WCloudNormals(depth_map, normals, 50, 0.02f,
cv::viz::Color::green()));
viz_3d->showWidget("coo", cv::viz::WCoordinateSystem(1.5));
viz_3d->spinOnce(0, true);
}
void computeCovariance(const cv::Mat& neighborhood, const cv::Vec3f& mean,
const size_t neighborhood_size, cv::Mat* covariance) {
CHECK(!neighborhood.empty());
CHECK_EQ(neighborhood.rows, 3u);
CHECK_GT(neighborhood_size, 0u);
CHECK_LE(neighborhood_size, neighborhood.cols);
CHECK_NOTNULL(covariance);
*covariance = cv::Mat::zeros(3, 3, CV_32F);
for (size_t i = 0u; i < neighborhood_size; ++i) {
cv::Vec3f point;
for (size_t row = 0u; row < neighborhood.rows; ++row) {
point[row] = neighborhood.at<float>(row, i) - mean[row];
}
covariance->at<float>(0, 0) += point[0] * point[0];
covariance->at<float>(0, 1) += point[0] * point[1];
covariance->at<float>(0, 2) += point[0] * point[2];
covariance->at<float>(1, 1) += point[1] * point[1];
covariance->at<float>(1, 2) += point[1] * point[2];
covariance->at<float>(2, 2) += point[2] * point[2];
}
// Assign the symmetric elements of the covariance matrix.
covariance->at<float>(1, 0) = covariance->at<float>(0, 1);
covariance->at<float>(2, 0) = covariance->at<float>(0, 2);
covariance->at<float>(2, 1) = covariance->at<float>(1, 2);
}
size_t findNeighborhood(const cv::Mat& depth_map, const size_t window_size,
const float max_distance, const size_t x,
const size_t y, cv::Mat* neighborhood,
cv::Vec3f* mean) {
CHECK(!depth_map.empty());
CHECK_GT(window_size, 0u);
CHECK_EQ(window_size % 2u, 1u);
CHECK_GE(max_distance, 0.0f);
CHECK_GE(x, 0u);
CHECK_GE(y, 0u);
CHECK_LT(x, depth_map.cols);
CHECK_LT(y, depth_map.rows);
CHECK_NOTNULL(neighborhood);
CHECK_NOTNULL(mean);
size_t neighborhood_size = 0u;
*neighborhood = cv::Mat::zeros(3, window_size * window_size, CV_32FC1);
cv::Vec3f mid_point = depth_map.at<cv::Vec3f>(y, x);
for (size_t y_idx = 0u; y_idx < window_size; ++y_idx) {
const int y_filter_idx = y + y_idx - window_size / 2u;
if (y_filter_idx < 0 || y_filter_idx >= depth_map.rows) {
continue;
}
CHECK_GE(y_filter_idx, 0u);
CHECK_LT(y_filter_idx, depth_map.rows);
for (size_t x_idx = 0u; x_idx < window_size; ++x_idx) {
const int x_filter_idx = x + x_idx - window_size / 2u;
if (x_filter_idx < 0 || x_filter_idx >= depth_map.cols) {
continue;
}
CHECK_GE(x_filter_idx, 0u);
CHECK_LT(x_filter_idx, depth_map.cols);
cv::Vec3f filter_point =
depth_map.at<cv::Vec3f>(y_filter_idx, x_filter_idx);
// Compute Euclidean distance between filter_point and mid_point.
const cv::Vec3f difference = mid_point - filter_point;
const float euclidean_dist = cv::sqrt(difference.dot(difference));
if (euclidean_dist < max_distance) {
// Add the filter_point to neighborhood set.
for (size_t coordinate = 0u; coordinate < 3u; ++coordinate) {
neighborhood->at<float>(coordinate, neighborhood_size) =
filter_point[coordinate];
}
++neighborhood_size;
*mean += filter_point;
}
}
}
CHECK_GE(neighborhood_size, 1u);
CHECK_LE(neighborhood_size, window_size * window_size);
*mean /= static_cast<float>(neighborhood_size);
return neighborhood_size;
}
// \brief Compute point normals of a depth image.
//
// Compute the point normals by looking at a neighborhood around each pixel.
// We're taking a standard squared kernel, where we discard points that are too
// far away from the center point (by evaluating the Euclidean distance).
//
void computeOwnNormals(const SurfaceNormalParams& params,
const cv::Mat& depth_map, cv::Mat* normals) {
CHECK(!depth_map.empty());
CHECK_EQ(depth_map.type(), CV_32FC3);
CHECK_NOTNULL(normals);
CHECK_EQ(depth_map.size(), normals->size());
cv::Mat neighborhood =
cv::Mat::zeros(3, params.window_size * params.window_size, CV_32FC1);
cv::Mat eigenvalues;
cv::Mat eigenvectors;
cv::Mat covariance(3, 3, CV_32FC1);
covariance = cv::Mat::zeros(3, 3, CV_32FC1);
cv::Vec3f mean;
cv::Vec3f mid_point;
constexpr float float_nan = std::numeric_limits<float>::quiet_NaN();
#pragma omp parallel for private(neighborhood, eigenvalues, eigenvectors, \
covariance, mean, mid_point)
for (size_t y = 0u; y < depth_map.rows; ++y) {
for (size_t x = 0u; x < depth_map.cols; ++x) {
mid_point = depth_map.at<cv::Vec3f>(y, x);
// Skip point if z value is nan.
if (cvIsNaN(mid_point[0]) || cvIsNaN(mid_point[1]) ||
cvIsNaN(mid_point[2]) || (mid_point[2] == 0.0)) {
normals->at<cv::Vec3f>(y, x) =
cv::Vec3f(float_nan, float_nan, float_nan);
continue;
}
const float max_distance =
params.distance_factor_threshold * mid_point[2];
mean = cv::Vec3f(0.0f, 0.0f, 0.0f);
const size_t neighborhood_size =
findNeighborhood(depth_map, params.window_size, max_distance, x, y,
&neighborhood, &mean);
if (neighborhood_size > 1u) {
computeCovariance(neighborhood, mean, neighborhood_size, &covariance);
// Compute Eigen vectors.
cv::eigen(covariance, eigenvalues, eigenvectors);
// Get the Eigenvector corresponding to the smallest Eigenvalue.
constexpr size_t n_th_eigenvector = 2u;
for (size_t coordinate = 0u; coordinate < 3u; ++coordinate) {
normals->at<cv::Vec3f>(y, x)[coordinate] =
eigenvectors.at<float>(n_th_eigenvector, coordinate);
}
// Re-Orient normals to point towards camera.
if (normals->at<cv::Vec3f>(y, x)[2] > 0.0f) {
normals->at<cv::Vec3f>(y, x) = -normals->at<cv::Vec3f>(y, x);
}
} else {
normals->at<cv::Vec3f>(y, x) =
cv::Vec3f(float_nan, float_nan, float_nan);
}
}
}
}
} // namespace depth_segmentation
#endif // DEPTH_SEGMENTATION_COMMON_H_
|
Percent.h | /*
open source routing machine
Copyright (C) Dennis Luxen, others 2010
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU AFFERO General Public License as published by
the Free Software Foundation; either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
or see http://www.gnu.org/licenses/agpl.txt.
*/
#ifndef PERCENT_H
#define PERCENT_H
#include <iostream>
#ifdef _OPENMP
#include <omp.h>
#endif
class Percent
{
public:
/**
* Constructor.
* @param maxValue the value that corresponds to 100%
* @param step the progress is shown in steps of 'step' percent
*/
Percent(unsigned maxValue, unsigned step = 5) {
reinit(maxValue, step);
}
/** Reinitializes this object. */
void reinit(unsigned maxValue, unsigned step = 5) {
_maxValue = maxValue;
_current_value = 0;
_intervalPercent = _maxValue / 100;
_nextThreshold = _intervalPercent;
_lastPercent = 0;
_step = step;
}
/** If there has been significant progress, display it. */
void printStatus(unsigned currentValue) {
if (currentValue >= _nextThreshold) {
_nextThreshold += _intervalPercent;
printPercent( currentValue / (double)_maxValue * 100 );
}
if (currentValue + 1 == _maxValue)
std::cout << " 100%" << std::endl;
}
void printIncrement()
{
#pragma omp atomic
_current_value++;
printStatus(_current_value);
}
private:
unsigned _current_value;
unsigned _maxValue;
unsigned _intervalPercent;
unsigned _nextThreshold;
unsigned _lastPercent;
unsigned _step;
/** Displays the new progress. */
void printPercent(double percent) {
while (percent >= _lastPercent+_step) {
_lastPercent+=_step;
if (_lastPercent % 10 == 0) {
std::cout << " " << _lastPercent << "% ";
}
else {
std::cout << ".";
}
std::cout.flush();
}
}
};
#endif // PERCENT_H
|
target_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target simd'}}
#pragma omp target simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target simd'}}
#pragma omp target simd foo
void test_no_clause() {
int i;
#pragma omp target simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp target simd' must be a for loop}}
#pragma omp target simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
#pragma omp target simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
#pragma omp target simd;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
#pragma omp target simd private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
#pragma omp target simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target simd collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target simd collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
#pragma omp target simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target simd collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target simd collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target simd collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target simd private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target simd private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target simd lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target simd lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target simd firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target simd firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
void test_safelen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target simd safelen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd safelen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target simd safelen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target simd safelen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target simd safelen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp target simd safelen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp target simd safelen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp target simd safelen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_simdlen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target simd simdlen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd simdlen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target simd simdlen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target simd simdlen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target simd simdlen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp target simd simdlen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp target simd simdlen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp target simd simdlen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_safelen_simdlen() {
int i;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp target simd simdlen(6) safelen(5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp target simd safelen(5) simdlen(6)
for (i = 0; i < 16; ++i)
;
}
|
linear.c | #include "linear.h"
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <lauxlib.h>
#include <cblas.h>
#include <lapacke.h>
/* matrix orders */
static const char * const ORDERS[] = { "row", "col", NULL };
/* checks an order */
static CBLAS_ORDER checkorder (lua_State *L, int index) {
switch (luaL_checkoption(L, index, "row", ORDERS)) {
case 0:
return CblasRowMajor;
case 1:
return CblasColMajor;
}
/* not reached */
assert(0);
return (CBLAS_ORDER)0;
}
/* checks a transpose */
static CBLAS_TRANSPOSE checktranspose (lua_State *L, int index) {
static const char * const TRANSPOSES[] = { "notrans", "trans", NULL };
switch (luaL_checkoption(L, index, "notrans", TRANSPOSES)) {
case 0:
return CblasNoTrans;
case 1:
return CblasTrans;
}
/* not reached */
assert(0);
return (CBLAS_TRANSPOSE)0;
}
/* translates a transpose for LAPACK */
static char lapacktranspose (CBLAS_TRANSPOSE transpose) {
switch (transpose) {
case CblasNoTrans:
return 'N';
case CblasTrans:
return 'T';
default:
/* not reached */
assert(0);
return '\0';
}
}
/* returns an int value from a table */
static int intvalue (lua_State *L, const char *key, int dfl) {
int result, isinteger;
lua_getfield(L, -1, key);
if (!lua_isnil(L, -1)) {
result = lua_tointegerx(L, -1, &isinteger);
if (!isinteger) {
luaL_error(L, "bad field " LUA_QS, key);
}
} else {
if (dfl < 0) {
luaL_error(L, "missing field " LUA_QS, key);
}
result = dfl;
}
lua_pop(L, 1);
return result;
}
/* returns an option value from a table */
static int optionvalue (lua_State *L, const char *key, const char *dfl,
const char *options[]) {
const char *str;
int i;
lua_getfield(L, -1, key);
if (!lua_isnil(L, -1)) {
str = lua_tostring(L, -1);
if (str == NULL) {
luaL_error(L, "bad field " LUA_QS, key);
}
} else {
if (dfl == NULL) {
luaL_error(L, "missing field " LUA_QS, key);
}
str = dfl;
}
lua_pop(L, 1);
for (i = 0; options[i] != NULL; i++) {
if (strcmp(options[i], str) == 0) {
return i;
}
}
luaL_error(L, "bad option " LUA_QS " in field " LUA_QS, str, key);
return 0; /* not reached */
}
/* raises a linear argument error */
static int argerror (lua_State *L, int index) {
return luaL_argerror(L, index, lua_pushfstring(L, "vector, or matrix "
"expected, got %s", luaL_typename(L, index)));
}
/* pushes a new vector onto the stack */
static struct vector *newvector (lua_State *L, int size) {
return lualinear_newvector(L, size);
}
/* pushes an existing vector onto the stack */
static struct vector *wrapvector (lua_State *L, int size, float *values) {
return lualinear_wrapvector(L, size, values);
}
/* creates a new vector */
static int vector (lua_State *L) {
int size;
/* process arguments */
size = luaL_checkinteger(L, 1);
luaL_argcheck(L, size >= 1, 1, "bad dimension");
/* create */
newvector(L, size);
return 1;
}
/* vector length implementation */
static int vector_len (lua_State *L) {
struct vector *x;
x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE);
lua_pushinteger(L, x->size);
return 1;
}
/* vector index implementation */
static int vector_index (lua_State *L) {
struct vector *x;
int index;
x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE);
index = luaL_checkinteger(L, 2);
luaL_argcheck(L, index >= 1 && index <= x->size, 2, "bad index");
lua_pushnumber(L, x->values[(size_t)(index - 1) * x->inc]);
return 1;
}
/* matrix vector newindex implementation */
static int vector_newindex (lua_State *L) {
struct vector *x;
int index;
float value;
x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE);
index = luaL_checkinteger(L, 2);
luaL_argcheck(L, index >= 1 && index <= x->size, 2, "bad index");
value = luaL_checknumber(L, 3);
x->values[(size_t)(index - 1) * x->inc] = value;
return 0;
}
/* vector next function */
static int vector_next (lua_State *L) {
struct vector *x;
int index;
x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE);
index = luaL_checkinteger(L, 2);
if (index >= 0 && index < x->size) {
lua_pushinteger(L, index + 1);
lua_pushnumber(L, x->values[(size_t)index]);
return 2;
}
lua_pushnil(L);
return 1;
}
/* vector ipairs function */
static int vector_ipairs (lua_State *L) {
luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE);
lua_pushcfunction(L, vector_next);
lua_pushvalue(L, 1);
lua_pushinteger(L, 0);
return 3;
}
/* returns the string representation of a vector */
static int vector_tostring (lua_State *L) {
struct vector *x;
x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE);
lua_pushfstring(L, "vector: %p", x);
return 1;
}
/* frees a vector */
static int vector_free (lua_State *L) {
struct vector *x;
x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE);
if (x->ref == LUA_NOREF) {
free(x->values);
} else {
luaL_unref(L, LUA_REGISTRYINDEX, x->ref);
}
return 0;
}
/* pushes a new matrix onto the stack */
static struct matrix *newmatrix (lua_State *L, int rows, int cols,
CBLAS_ORDER order) {
return lualinear_newmatrix(L, rows, cols, order);
}
/* pushes an existing matrix onto the stack */
static struct matrix *wrapmatrix (lua_State *L, int rows, int cols,
CBLAS_ORDER order, float *values) {
return lualinear_wrapmatrix(L, rows, cols, order, values);
}
/* creates a new matrix */
static int matrix (lua_State *L) {
int rows, cols;
CBLAS_ORDER order;
/* process arguments */
rows = luaL_checkinteger(L, 1);
luaL_argcheck(L, rows >= 1, 1, "bad dimension");
cols = luaL_checkinteger(L, 2);
luaL_argcheck(L, cols >= 1, 2, "bad dimension");
order = checkorder(L, 3);
/* create */
newmatrix(L, rows, cols, order);
return 1;
}
/* returns the length of a matrix */
static int matrix_len (lua_State *L) {
struct matrix *X;
X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE);
switch (X->order) {
case CblasRowMajor:
lua_pushinteger(L, X->rows);
break;
case CblasColMajor:
lua_pushinteger(L, X->cols);
break;
}
return 1;
}
/* matrix index implementation */
static int matrix_index (lua_State *L) {
struct matrix *X;
int index, size;
struct vector *x;
/* process arguments */
X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE);
index = luaL_checkinteger(L, 2);
luaL_argcheck(L, index >= 1, 2, "bad index");
switch (X->order) {
case CblasRowMajor:
luaL_argcheck(L, index <= X->rows, 2, "bad index");
size = X->cols;
break;
case CblasColMajor:
luaL_argcheck(L, index <= X->cols, 2, "bad index");
size = X->rows;
break;
default:
/* not reached */
size = -1;
assert(0);
}
/* create vector */
x = wrapvector(L, size, &X->values[(size_t)(index - 1) * X->ld]);
lua_pushvalue(L, 1);
x->ref = luaL_ref(L, LUA_REGISTRYINDEX);
return 1;
}
/* matrix next function */
static int matrix_next (lua_State *L) {
struct matrix *X;
int index, majorsize, minorsize;
struct vector *x;
X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE);
index = luaL_checkinteger(L, 2);
switch (X->order) {
case CblasRowMajor:
majorsize = X->rows;
minorsize = X->cols;
break;
case CblasColMajor:
majorsize = X->cols;
minorsize = X->rows;
break;
default:
/* not reached */
assert(0);
return 0;
}
if (index >= 0 && index < majorsize) {
lua_pushinteger(L, index + 1);
x = wrapvector(L, minorsize, &X->values[(size_t)index * X->ld]);
lua_pushvalue(L, 1);
x->ref = luaL_ref(L, LUA_REGISTRYINDEX);
return 2;
}
lua_pushnil(L);
return 1;
}
/* matrix ipairs function */
static int matrix_ipairs (lua_State *L) {
luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE);
lua_pushcfunction(L, matrix_next);
lua_pushvalue(L, 1);
lua_pushinteger(L, 0);
return 3;
}
/* returns the string representation of a matrix */
static int matrix_tostring (lua_State *L) {
struct matrix *X;
X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE);
lua_pushfstring(L, "matrix: %p", X);
return 1;
}
/* frees a matrix */
static int matrix_free (lua_State *L) {
struct matrix *X;
X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE);
if (X->ref == LUA_NOREF) {
free(X->values);
} else {
luaL_unref(L, LUA_REGISTRYINDEX, X->ref);
}
return 0;
}
/* returns the type of a linear object */
static int type (lua_State *L) {
if (luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE) != NULL) {
lua_pushliteral(L, "vector");
return 1;
}
if (luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE) != NULL) {
lua_pushliteral(L, "matrix");
return 1;
}
lua_pushnil(L);
return 1;
}
/* returns the size of a linear object */
static int size (lua_State *L) {
struct vector *x;
struct matrix *X;
x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE);
if (x != NULL) {
lua_pushinteger(L, x->size);
return 1;
}
X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE);
if (X != NULL) {
lua_pushinteger(L, X->rows);
lua_pushinteger(L, X->cols);
lua_pushstring(L, ORDERS[X->order == CblasRowMajor ? 0 : 1]);
return 3;
}
return argerror(L, 1);
}
/* transposed vector */
static int tvector (lua_State *L) {
struct matrix *X;
int index, size;
struct vector *x;
/* process arguments */
X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE);
index = luaL_checkinteger(L, 2);
luaL_argcheck(L, index >= 1, 2, "bad index");
switch (X->order) {
case CblasRowMajor:
luaL_argcheck(L, index <= X->cols, 2, "bad index");
size = X->rows;
break;
case CblasColMajor:
luaL_argcheck(L, index <= X->rows, 2, "bad index");
size = X->cols;
break;
default:
/* not reached */
size = -1;
assert(0);
}
/* create vector */
x = wrapvector(L, size, &X->values[index - 1]);
x->inc = X->ld;
lua_pushvalue(L, 1);
x->ref = luaL_ref(L, LUA_REGISTRYINDEX);
return 1;
}
/* subvector or submatrix */
static int sub (lua_State *L) {
struct vector *x, *s;
struct matrix *X, *S;
/* process arguments */
x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE);
if (x != NULL) {
int start, end;
start = luaL_optint(L, 2, 1);
luaL_argcheck(L, start >= 1 && start <= x->size, 2,
"bad index");
end = luaL_optint(L, 3, x->size);
luaL_argcheck(L, end >= start && end <= x->size, 3,
"bad index");
s = wrapvector(L, end - start + 1, &x->values[
(size_t)(start - 1) * x->inc]);
s->inc = x->inc;
lua_pushvalue(L, 1);
s->ref = luaL_ref(L, LUA_REGISTRYINDEX);
return 1;
}
X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE);
if (X != NULL) {
int rowstart, rowend, colstart, colend;
switch (X->order){
case CblasRowMajor:
rowstart = luaL_optint(L, 2, 1);
luaL_argcheck(L, rowstart >= 1 && rowstart <= X->rows,
2, "bad index");
colstart = luaL_optint(L, 3, 1);
luaL_argcheck(L, colstart >= 1 && colstart <= X->cols,
3, "bad index");
rowend = luaL_optint(L, 4, X->rows);
luaL_argcheck(L, rowend >= rowstart && rowend
<= X->rows, 4, "bad index");
colend = luaL_optint(L, 5, X->cols);
luaL_argcheck(L, colend >= colstart && colend
<= X->cols, 5, "bad index");
S = wrapmatrix(L, rowend - rowstart + 1, colend
- colstart + 1, X->order, &X->values[
(size_t)(rowstart - 1) * X->ld
+ colstart - 1]);
break;
case CblasColMajor:
colstart = luaL_optint(L, 2, 1);
luaL_argcheck(L, colstart >= 1 && colstart <= X->cols,
2, "bad index");
rowstart = luaL_optint(L, 3, 1);
luaL_argcheck(L, rowstart >= 1 && rowstart <= X->rows,
3, "bad index");
colend = luaL_optint(L, 4, X->cols);
luaL_argcheck(L, colend >= colstart && colend
<= X->cols, 4, "bad index");
rowend = luaL_optint(L, 5, X->rows);
luaL_argcheck(L, rowend >= rowstart && rowend
<= X->rows, 5, "bad index");
S = wrapmatrix(L, rowend - rowstart + 1, colend
- colstart + 1, X->order, &X->values[
(size_t)(colstart - 1) * X->ld
+ rowstart - 1]);
break;
default:
/* not reached */
assert(0);
return 0;
}
S->ld = X->ld;
lua_pushvalue(L, 1);
S->ref = luaL_ref(L, LUA_REGISTRYINDEX);
return 1;
}
return argerror(L, 1);
}
/* unwinds matrices into a vector */
static int unwind (lua_State *L) {
struct vector *x;
int index, i, j, k;
size_t base;
struct matrix *X;
if (lua_gettop(L) == 0) {
return luaL_error(L, "wrong number of arguments");
}
x = luaL_checkudata(L, lua_gettop(L), LUALINEAR_VECTOR_METATABLE);
index = 1;
i = 0;
while (i < x->size) {
X = luaL_checkudata(L, index, LUALINEAR_MATRIX_METATABLE);
luaL_argcheck(L, X->rows * X->cols <= x->size - i, index,
"matrix too large");
switch (X->order) {
case CblasRowMajor:
for (j = 0; j < X->rows; j++) {
base = (size_t)j * X->ld;
for (k = 0; k < X->cols; k++) {
x->values[(size_t)i * x->inc]
= X->values[base + k];
i++;
}
}
break;
case CblasColMajor:
for (j = 0; j < X->cols; j++) {
base = (size_t)j * X->ld;
for (k = 0; k < X->rows; k++) {
x->values[(size_t)i * x->inc]
= X->values[base + k];
i++;
}
}
break;
}
index++;
}
return 0;
}
/* reshapes a vector into matrices */
static int reshape (lua_State *L) {
struct vector *x;
int index, i, j, k;
size_t base;
struct matrix *X;
x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE);
index = 2;
i = 0;
while (i < x->size) {
X = luaL_checkudata(L, index, LUALINEAR_MATRIX_METATABLE);
luaL_argcheck(L, X->rows * X->cols <= x->size - i, index,
"matrix too large");
switch (X->order) {
case CblasRowMajor:
for (j = 0; j < X->rows; j++) {
base = (size_t)j * X->ld;
for (k = 0; k < X->cols; k++) {
X->values[base + k] = x->values[
(size_t)i * x->inc];
i++;
}
}
break;
case CblasColMajor:
for (j = 0; j < X->cols; j++) {
base = (size_t)j * X->ld;
for (k = 0; k < X->rows; k++) {
X->values[base + k] = x->values[
(size_t)i * x->inc];
i++;
}
}
break;
}
index++;
}
return 0;
}
/* converts a vector or matrix to a table */
static int totable (lua_State *L) {
struct vector *x;
struct matrix *X;
int i, j;
const float *value;
/* check and process arguments */
x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE);
if (x != NULL) {
lua_createtable(L, 0, 3);
lua_pushliteral(L, "vector");
lua_setfield(L, -2, "type");
lua_pushinteger(L, x->size);
lua_setfield(L, -2, "length");
lua_createtable(L, x->size, 0);
value = x->values;
for (i = 0; i < x->size; i++) {
lua_pushnumber(L, *value);
lua_rawseti(L, -2, i + 1);
value += x->inc;
}
lua_setfield(L, -2, "values");
return 1;
}
X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE);
if (X != NULL) {
lua_createtable(L, 0, 5);
lua_pushliteral(L, "matrix");
lua_setfield(L, -2, "type");
lua_pushinteger(L, X->rows);
lua_setfield(L, -2, "rows");
lua_pushinteger(L, X->cols);
lua_setfield(L, -2, "cols");
switch (X->order) {
case CblasRowMajor:
lua_pushliteral(L, "rowmajor");
lua_setfield(L, -2, "order");
lua_createtable(L, X->rows, 0);
for (i = 0; i < X->rows; i++) {
lua_createtable(L, X->cols, 0);
value = &X->values[(size_t)i * X->ld];
for (j = 0; j < X->cols; j++) {
lua_pushnumber(L, *value++);
lua_rawseti(L, -2, j + 1);
}
lua_rawseti(L, -2, i + 1);
}
lua_setfield(L, -2, "values");
break;
case CblasColMajor:
lua_pushliteral(L, "colmajor");
lua_setfield(L, -2, "order");
lua_createtable(L, X->cols, 0);
for (i = 0; i < X->cols; i++) {
lua_createtable(L, X->rows, 0);
value = &X->values[(size_t)i * X->ld];
for (j = 0; j < X->rows; j++) {
lua_pushnumber(L, *value++);
lua_rawseti(L, -2, j + 1);
}
lua_rawseti(L, -2, i + 1);
}
lua_setfield(L, -2, "values");
break;
}
return 1;
}
return argerror(L, 1);
}
/* converts a table to a vector or matrix */
static int tolinear (lua_State *L) {
static const char *types[] = { "vector", "matrix", NULL };
static const char *orders[] = { "rowmajor", "colmajor", NULL };
struct vector *x;
struct matrix *X;
int size, rows, cols, major, minor;
CBLAS_ORDER order;
int i, j;
int isnum;
float *value;
/* check arguments */
luaL_checktype(L, 1, LUA_TTABLE);
lua_settop(L, 1);
/* handle types */
switch (optionvalue(L, "type", NULL, types)) {
case 0: /* vector */
size = intvalue(L, "length", -1);
if (size < 1) {
return luaL_error(L, "bad field " LUA_QS, "length");
}
x = newvector(L, size);
lua_getfield(L, 1, "values");
if (lua_type(L, -1) != LUA_TTABLE) {
return luaL_error(L, "bad field " LUA_QS, "values");
}
value = x->values;
for (i = 0; i < size; i++) {
lua_rawgeti(L, -1, i + 1);
*value++ = lua_tonumberx(L, -1, &isnum);
if (!isnum) {
return luaL_error(L, "bad value at index %d",
i + 1);
}
lua_pop(L, 1);
}
lua_pop(L, 1);
return 1;
case 1: /* matrix */
rows = intvalue(L, "rows", -1);
if (rows < 1) {
return luaL_error(L, "bad field " LUA_QS, "rows");
}
cols = intvalue(L, "cols", -1);
if (cols < 1) {
return luaL_error(L, "bad field " LUA_QS, "cols");
}
switch (optionvalue(L, "order", NULL, orders)) {
case 0:
order = CblasRowMajor;
major = rows;
minor = cols;
break;
case 1:
order = CblasColMajor;
major = cols;
minor = rows;
break;
default:
/* not reched */
assert(0);
return 0;
}
X = newmatrix(L, rows, cols, order);
lua_getfield(L, 1, "values");
if (lua_type(L, -1) != LUA_TTABLE) {
return luaL_error(L, "bad field " LUA_QS, "values");
}
for (i = 0; i < major; i++) {
value = &X->values[i * X->ld];
lua_rawgeti(L, -1, i + 1);
if (lua_type(L, -1) != LUA_TTABLE) {
return luaL_error(L, "bad value at index %d",
i + 1);
}
for (j = 0; j < minor; j++) {
lua_rawgeti(L, -1, j + 1);
*value++ = lua_tonumberx(L, -1, &isnum);
if (!isnum) {
return luaL_error(L, "bad value at "
"index (%d,%d)", i + 1,
j + 1);
}
lua_pop(L, 1);
}
lua_pop(L, 1);
}
lua_pop(L, 1);
return 1;
}
/* not reached */
assert(0);
return 0;
}
/* invokes the DOT subprogram (x' y) */
static int dot (lua_State *L) {
struct vector *x, *y;
float dot;
/* check and process arguments */
x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE);
y = luaL_checkudata(L, 2, LUALINEAR_VECTOR_METATABLE);
luaL_argcheck(L, y->size == x->size, 2, "dimension mismatch");
/* invoke subprogram */
dot = cblas_sdot(x->size, x->values, x->inc, y->values, y->inc);
lua_pushnumber(L, dot);
return 1;
}
/* invokes the NRM2 subprogram (||x||_2) */
static int nrm2 (lua_State *L) {
struct vector *x;
float nrm2;
/* check and process arguments */
x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE);
/* invoke subprogram */
nrm2 = cblas_snrm2(x->size, x->values, x->inc);
lua_pushnumber(L, nrm2);
return 1;
}
/* invokes the ASUM subprogram (sigma |x|) */
static int asum (lua_State *L) {
struct vector *x;
float asum;
/* check and process arguments */
x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE);
/* invoke subprogram */
asum = cblas_sasum(x->size, x->values, x->inc);
lua_pushnumber(L, asum);
return 1;
}
/* invokes the IAMAX subprogram (argmax |x|) */
static int iamax (lua_State *L) {
struct vector *x;
int iamax;
/* check and process arguments */
x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE);
/* invoke subprogram */
iamax = cblas_isamax(x->size, x->values, x->inc);
lua_pushinteger(L, iamax + 1);
return 1;
}
/* sum implementation */
static float _sum (const float *values, int size, int inc) {
float sum;
int i;
sum = 0.0;
#pragma omp parallel for private(i) schedule(auto) \
if(size >= LUALINEAR_OMP_MINSIZE) reduction(+:sum)
for (i = 0; i < size; i++) {
sum += values[(size_t)i * inc];
}
return sum;
}
/* sum implementation (sigma x_i) */
static int sum (lua_State *L) {
struct vector *x, *y;
struct matrix *X;
int i;
/* check and process arguments */
x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE);
if (x != NULL) {
lua_pushnumber(L, _sum(x->values, x->size, x->inc));
return 1;
}
X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE);
if (X != NULL) {
y = luaL_checkudata(L, 2, LUALINEAR_VECTOR_METATABLE);
switch (checktranspose(L, 3)) {
case CblasNoTrans:
switch (X->order) {
case CblasRowMajor:
luaL_argcheck(L, y->size == X->rows, 2,
"dimension mismatch");
for (i = 0; i < X->rows; i++) {
y->values[(size_t)i * y->inc] = _sum(
&X->values[(size_t)i
* X->ld], X->cols, 1);
}
break;
case CblasColMajor:
luaL_argcheck(L, y->size == X->cols, 2,
"dimension mismatch");
for (i = 0; i < X->cols; i++) {
y->values[(size_t)i * y->inc] = _sum(
&X->values[(size_t)i
* X->ld], X->rows, 1);
}
break;
}
break;
case CblasTrans:
switch (X->order) {
case CblasRowMajor:
luaL_argcheck(L, y->size == X->cols, 2,
"dimension mismatch");
for (i = 0; i < X->cols; i++) {
y->values[(size_t)i * y->inc] = _sum(
&X->values[(size_t)i],
X->rows, X->ld);
}
break;
case CblasColMajor:
luaL_argcheck(L, y->size == X->rows, 2,
"dimension mismatch");
for (i = 0; i < X->rows; i++) {
y->values[(size_t)i * y->inc] = _sum(
&X->values[(size_t)i],
X->cols, X->ld);
}
break;
}
break;
default:
/* not reached */
assert(0);
break;
}
return 0;
}
return argerror(L, 1);
}
/* xy function */
typedef void(*xyfunction)(int, float *, int, float *, int, float);
/* invokes an (x,y) subproram */
static int xy (lua_State *L, xyfunction s, int hasy, int hasalpha) {
int index, i;
float alpha;
struct vector *x, *y;
struct matrix *X, *Y;
/* check and process arguments */
index = 2;
x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE);
if (x != NULL) {
if (hasy) {
y = luaL_testudata(L, 2, LUALINEAR_VECTOR_METATABLE);
Y = luaL_testudata(L, 2, LUALINEAR_MATRIX_METATABLE);
if (y == NULL && Y == NULL) {
return argerror(L, 2);
}
index++;
} else {
y = x;
Y = NULL;
}
if (hasalpha) {
alpha = luaL_optnumber(L, index, 1.0);
index++;
} else {
alpha = 0.0;
}
if (y != NULL) {
/* invoke subprogram on vector-vector */
luaL_argcheck(L, y->size == x->size, 2,
"dimension mismatch");
s(x->size, x->values, x->inc, y->values, y->inc, alpha);
return 0;
}
/* invoke subprogram on vector-matrix */
switch (checktranspose(L, index)) {
case CblasNoTrans:
switch (Y->order) {
case CblasRowMajor:
luaL_argcheck(L, 1, x->size == Y->cols,
"dimension mismatch");
for (i = 0; i < Y->rows; i++) {
s(x->size, x->values, x->inc,
&Y->values[(size_t)i
* Y->ld], 1, alpha);
}
break;
case CblasColMajor:
luaL_argcheck(L, 1, x->size == Y->rows,
"dimension mismatch");
for (i = 0; i < Y->cols; i++) {
s(x->size, x->values, x->inc,
&Y->values[(size_t)i
* Y->ld], 1, alpha);
}
break;
}
break;
case CblasTrans:
switch (Y->order) {
case CblasRowMajor:
luaL_argcheck(L, 1, x->size == Y->rows,
"dimension mismatch");
for (i = 0; i < Y->rows; i++) {
s(x->size, x->values, x->inc,
&Y->values[(size_t)i],
Y->ld, alpha);
}
break;
case CblasColMajor:
luaL_argcheck(L, 1, x->size == Y->cols,
"dimension mismatch");
for (i = 0; i < Y->cols; i++) {
s(x->size, x->values, x->inc,
&Y->values[(size_t)i],
Y->ld, alpha);
}
break;
}
break;
default:
/* not reached */
assert(0);
}
return 0;
}
X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE);
if (X != NULL) {
if (hasy) {
Y = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE);
luaL_argcheck(L, X->order == Y->order, 2,
"order mismatch");
luaL_argcheck(L, X->rows == Y->rows && X->cols
== Y->cols, 2, "dimension mismatch");
index++;
} else {
Y = X;
}
if (hasalpha) {
alpha = luaL_optnumber(L, index, 1.0);
index++;
} else {
alpha = 0.0;
}
/* invoke subprogram on matrix-matrix */
switch (X->order) {
case CblasRowMajor:
for (i = 0; i < X->rows; i++) {
s(X->cols, &X->values[(size_t)i * X->ld], 1,
&Y->values[(size_t)i * Y->ld],
1, alpha);
}
break;
case CblasColMajor:
for (i = 0; i < X->cols; i++) {
s(X->rows, &X->values[(size_t)i * X->ld], 1,
&Y->values[(size_t)i * Y->ld],
1, alpha);
}
break;
}
return 0;
}
return argerror(L, 1);
}
/* wraps the SWAP subprogram */
static void _swap (int size, float *x, int incx, float *y, int incy,
float alpha) {
(void)alpha;
cblas_sswap(size, x, incx, y, incy);
}
/* invokes the SWAP subprogram (y <-> x) */
static int swap (lua_State *L) {
return xy(L, _swap, 1, 0);
}
/* wraps the COPY subprogram */
static void _copy (int size, float *x, int incx, float *y, int incy,
float alpha) {
(void)alpha;
cblas_scopy(size, x, incx, y, incy);
}
/* invokes the COPY subprogram (y <- x) */
static int copy (lua_State *L) {
return xy(L, _copy, 1, 0);
}
/* wraps the AXPY subprogram */
static void _axpy (int size, float *x, int incx, float *y, int incy,
float alpha) {
cblas_saxpy(size, alpha, x, incx, y, incy);
}
/* invokes the AXPY subprogram (y <- alpha x + y) */
static int axpy (lua_State *L) {
return xy(L, _axpy, 1, 1);
}
/* wraps the SCAL subprogram */
static void _scal (int size, float *x, int incx, float *y, int incy,
float alpha) {
(void)y;
(void)incy;
cblas_sscal(size, alpha, x, incx);
}
/* invokes the SCAL subprogram (x <- alpha x) */
static int scal (lua_State *L) {
return xy(L, _scal, 0, 1);
}
/* set operation implementation */
static void _set (int size, float *x, int incx, float *y, int incy,
float alpha) {
int i;
(void)y;
(void)incy;
#pragma omp parallel for private(i) schedule(auto) \
if(size >= LUALINEAR_OMP_MINSIZE)
for (i = 0; i < size; i++) {
x[(size_t)i * incx] = alpha;
}
}
/* performs a set operation (x <- alpha) */
static int set (lua_State *L) {
return xy(L, _set, 0, 1);
}
/* uniform RNG implementation */
static void _uniform (int size, float *x, int incx, float *y, int incy,
float alpha) {
int i;
(void)y;
(void)incy;
(void)alpha;
for (i = 0; i < size; i++) {
*x = (float)random() * (1.0 / ((float)RAND_MAX + 1.0));
x += incx;
}
}
/* performs a uniform operation (x <- uniform) */
static int uniform (lua_State *L) {
return xy(L, _uniform, 0, 0);
}
/* normal RNG implementation */
static void _normal (int size, float *x, int incx, float *y, int incy,
float alpha) {
int i;
float u1, u2, r, s, c;
(void)y;
(void)incy;
(void)alpha;
for (i = 0; i < size - 1; i += 2) {
do {
u1 = (float)random() * (1.0 / (float)RAND_MAX);
u2 = (float)random() * (1.0 / (float)RAND_MAX);
} while (u1 <= -DBL_MAX);
r = sqrt(-2.0 * logf(u1));
sincosf(2 * M_PI * u2, &s, &c);
*x = r * c;
x += incx;
*x = r * s;
x += incx;
}
if (i < size) {
do {
u1 = (float)random() * (1.0 / (float)RAND_MAX);
u2 = (float)random() * (1.0 / (float)RAND_MAX);
} while (u1 <= -DBL_MAX);
*x = sqrtf(-2.0 * logf(u1)) * cosf(2 * M_PI * u2);
x += incx;
}
}
/* performs a normal operation (x <- normal) */
static int normal (lua_State *L) {
return xy(L, _normal, 0, 0);
}
/* inc operation implementation */
static void _inc (int size, float *x, int incx, float *y, int incy,
float alpha) {
int i;
(void)y;
(void)incy;
#pragma omp parallel for private(i) schedule(auto) \
if(size >= LUALINEAR_OMP_MINSIZE)
for (i = 0; i < size; i++) {
x[(size_t)i * incx] += alpha;
}
}
/* performs a inc operation (x <- x + alpha) */
static int inc (lua_State *L) {
return xy(L, _inc, 0, 1);
}
/* element-wise multiplication implementation, alpha = 1 */
static void _mul1 (int size, float *x, int incx, float *y, int incy,
float alpha) {
int i;
(void)alpha;
#pragma omp parallel for private(i) schedule(auto) \
if(size >= LUALINEAR_OMP_MINSIZE)
for (i = 0; i < size; i++) {
y[(size_t)i * incy] *= x[(size_t)i * incx];
}
}
/* element-wise multiplication implementation, alpha = -1 */
static void _mulm1 (int size, float *x, int incx, float *y, int incy,
float alpha) {
int i;
(void)alpha;
#pragma omp parallel for private(i) schedule(auto) \
if(size >= LUALINEAR_OMP_MINSIZE)
for (i = 0; i < size; i++) {
y[(size_t)i * incy] /= x[(size_t)i * incx];
}
}
/* element-wise multiplication implementation, alpha = any */
static void _mul (int size, float *x, int incx, float *y, int incy,
float alpha) {
int i;
#pragma omp parallel for private(i) schedule(auto) \
if(size >= LUALINEAR_OMP_MINSIZE)
for (i = 0; i < size; i++) {
y[(size_t)i * incy] *= pow(x[(size_t)i * incx], alpha);
}
}
/* performs element-wise multiplication (y <- x^alpha .* y) */
static int mul (lua_State *L) {
float alpha;
alpha = luaL_optnumber(L, 3, 1.0);
if (alpha == 1.0) {
return xy(L, _mul1, 1, 1);
}
if (alpha == -1.0) {
return xy(L, _mulm1, 1, 1);
}
return xy(L, _mul, 1, 1);
}
/* power raising operation implementation */
static void _pow (int size, float *x, int incx, float *y, int incy,
float alpha) {
int i;
(void)y;
(void)incy;
#pragma omp parallel for private(i) schedule(auto) \
if(size >= LUALINEAR_OMP_MINSIZE)
for (i = 0; i < size; i++) {
x[(size_t)i * incx] = pow(x[(size_t)i * incx], alpha);
}
}
/* performs element-wise power raising (x <- x^alpha) */
static int powx (lua_State *L) {
return xy(L, _pow, 0, 1);
}
/* apply function */
typedef float(*applyfunction)(float);
/* applies a function to a value */
static int apply (lua_State *L, applyfunction apply, int parallel) {
struct vector *x;
struct matrix *X;
int i, j;
size_t base;
if (lua_type(L, 1) == LUA_TNUMBER) {
lua_pushnumber(L, apply(lua_tonumber(L, 1)));
return 1;
}
x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE);
if (x != NULL) {
#pragma omp parallel for private(i) schedule(auto) \
if(parallel && x->size >= LUALINEAR_OMP_MINSIZE)
for (i = 0; i < x->size; i++) {
x->values[(size_t)i * x->inc] =
apply(x->values[(size_t)i
* x->inc]);
}
return 0;
}
X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE);
if (X != NULL) {
switch (X->order) {
case CblasRowMajor:
for (i = 0; i < X->rows; i++) {
base = (size_t)i * X->ld;
#pragma omp parallel for private(j) \
schedule(auto) \
if(parallel && X->cols \
>= LUALINEAR_OMP_MINSIZE)
for (j = 0; j < X->cols; j++) {
X->values[base + j] = apply(
X->values[base
+ j]);
}
}
break;
case CblasColMajor:
for (i = 0; i < X->cols; i++) {
base = (size_t)i * X->ld;
#pragma omp parallel for private(j) \
schedule(auto) \
if(parallel && X->rows \
>= LUALINEAR_OMP_MINSIZE)
for (j = 0; j < X->rows; j++) {
X->values[base + j] = apply(
X->values[base
+ j]);
}
}
break;
}
return 0;
}
return luaL_argerror(L, 1, lua_pushfstring(L, "number, vector, or "
"matrix expected, got %s", luaL_typename(L, 1)));
}
/* sign function implementation */
static float _sign (float x) {
if (x > 0) {
return 1;
}
if (x < 0) {
return -1;
}
return x;
}
/* sign function */
static int sign (lua_State *L) {
return apply(L, _sign, 1);
}
/* abs function implementation */
static float _abs (float x) {
return abs(x);
}
/* abs function */
static int absx (lua_State *L) {
return apply(L, _abs, 1);
}
/* exp function */
static int expx (lua_State *L) {
return apply(L, expf, 1);
}
/* log function */
static int logx (lua_State *L) {
return apply(L, logf, 1);
}
/* logistic function implementation */
static float _logistic (float z) {
return 1.0 / (1.0 + expf(-z));
}
/* logistic function */
static int logistic (lua_State *L) {
return apply(L, _logistic, 1);
}
/* tanh function */
static int tanhx (lua_State *L) {
return apply(L, tanhf, 1);
}
/* softplus function implementation */
static float _softplus (float x) {
return logf(1 + expf(x));
}
/* softplus function */
static int softplus (lua_State *L) {
return apply(L, _softplus, 1);
}
/* rectifier function implementation */
static float _rectifier (float x) {
return x > 0.0 ? x : 0.0;
}
/* rectifier function */
static int rectifier (lua_State *L) {
return apply(L, _rectifier, 1);
}
/* current Lua state */
static __thread lua_State *TL;
/* apply function implementation */
static float _apply (float x) {
float result;
lua_pushvalue(TL, -1);
lua_pushnumber(TL, x);
lua_call(TL, 1, 1);
result = lua_tonumber(TL, -1);
lua_pop(TL, 1);
return result;
}
/* apply function */
static int applyx (lua_State *L) {
luaL_checktype(L, 2, LUA_TFUNCTION);
lua_settop(L, 2);
TL = L;
return apply(L, _apply, 0);
}
/* invokes the GEMV subprogram (y <- alpha A x + b y) */
static int gemv (lua_State *L) {
struct matrix *A;
struct vector *x, *y;
float alpha, beta;
CBLAS_TRANSPOSE ta;
int m, n;
/* check and process arguments */
A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE);
x = luaL_checkudata(L, 2, LUALINEAR_VECTOR_METATABLE);
y = luaL_checkudata(L, 3, LUALINEAR_VECTOR_METATABLE);
alpha = luaL_optnumber(L, 4, 1.0);
beta = luaL_optnumber(L, 5, 0.0);
ta = checktranspose(L, 6);
m = ta == CblasNoTrans ? A->rows : A->cols;
n = ta == CblasNoTrans ? A->cols : A->rows;
luaL_argcheck(L, x->size == n, 2, "dimension mismatch");
luaL_argcheck(L, y->size == m, 3, "dimension mismatch");
/* invoke subprogram */
cblas_sgemv(A->order, ta, A->rows, A->cols, alpha, A->values, A->ld,
x->values, x->inc, beta, y->values, y->inc);
return 0;
}
/* invokes the GER subprogram (A <- alpha x y' + A) */
static int ger (lua_State *L) {
struct vector *x, *y;
struct matrix *A;
float alpha;
/* check and process arguments */
x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE);
y = luaL_checkudata(L, 2, LUALINEAR_VECTOR_METATABLE);
A = luaL_checkudata(L, 3, LUALINEAR_MATRIX_METATABLE);
alpha = luaL_optnumber(L, 4, 1.0);
luaL_argcheck(L, x->size == A->rows, 1, "dimension mismatch");
luaL_argcheck(L, y->size == A->cols, 2, "dimension mismatch");
/* invoke subprogram */
cblas_sger(A->order, A->rows, A->cols, alpha, x->values, x->inc,
y->values, y->inc, A->values, A->ld);
return 0;
}
/* invokes the GEMM subprogram (C <- alpha A B + beta C) */
static int gemm (lua_State *L) {
struct matrix *A, *B, *C;
float alpha, beta;
CBLAS_TRANSPOSE ta, tb;
int m, n, ka, kb;
/* check and process arguments */
A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE);
B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE);
luaL_argcheck(L, B->order == A->order, 2, "order mismatch");
C = luaL_checkudata(L, 3, LUALINEAR_MATRIX_METATABLE);
luaL_argcheck(L, C->order == A->order, 3, "order mismatch");
alpha = luaL_optnumber(L, 4, 1.0);
beta = luaL_optnumber(L, 5, 0.0);
ta = checktranspose(L, 6);
tb = checktranspose(L, 7);
m = ta == CblasNoTrans ? A->rows : A->cols;
n = tb == CblasNoTrans ? B->cols : B->rows;
ka = ta == CblasNoTrans ? A->cols : A->rows;
kb = tb == CblasNoTrans ? B->rows : B->cols;
luaL_argcheck(L, ka == kb, 2, "dimension mismatch");
/* invoke subprogramm */
cblas_sgemm(A->order, ta, tb, m, n, ka, alpha, A->values, A->ld,
B->values, B->ld, beta, C->values, C->ld);
return 0;
}
/* invokes the GESV subprogram */
static int gesv (lua_State *L) {
struct matrix *A, *B;
int *ipiv, result;
/* check and process arguments */
A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE);
luaL_argcheck(L, A->rows == A->cols, 1, "not square");
B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE);
luaL_argcheck(L, B->order == A->order, 2, "order mismatch");
luaL_argcheck(L, B->rows == A->rows, 2, "dimension mismatch");
/* invoke subprogramm */
ipiv = calloc(A->rows, sizeof(lapack_int));
if (ipiv == NULL) {
return luaL_error(L, "cannot allocate indexes");
}
result = LAPACKE_sgesv(A->order, A->rows, B->cols, A->values, A->ld,
ipiv, B->values, B->ld);
free(ipiv);
lua_pushinteger(L, result);
return 1;
}
/* invokes the GELS subprogram */
static int gels (lua_State *L) {
struct matrix *A, *B;
char ta;
/* check and process arguments */
A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE);
B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE);
luaL_argcheck(L, B->order == A->order, 2, "order mismatch");
ta = lapacktranspose(checktranspose(L, 3));
luaL_argcheck(L, B->rows == (ta == 'N' ? A->rows : A->cols), 2,
"dimension mismatch");
/* invoke subprogramm */
lua_pushinteger(L, LAPACKE_sgels(A->order, ta, A->rows, A->cols,
B->cols, A->values, A->ld, B->values, B->ld));
return 1;
}
/* calculates the inverse of a matrix */
static int inv (lua_State *L) {
struct matrix *A;
int *ipiv, result;
/* check and process arguments */
A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE);
luaL_argcheck(L, A->rows == A->cols, 1, "not square");
/* invoke subprograms */
ipiv = calloc(A->rows, sizeof(lapack_int));
if (ipiv == NULL) {
return luaL_error(L, "cannot allocate indexes");
}
result = LAPACKE_sgetrf(A->order, A->rows, A->cols, A->values, A->ld,
ipiv);
if (result != 0) {
free(ipiv);
lua_pushinteger(L, result);
return 1;
}
result = LAPACKE_sgetri(A->order, A->rows, A->values, A->ld, ipiv);
free(ipiv);
lua_pushinteger(L, result);
return 1;
}
/* calculates the determinant of a matrix */
static int det (lua_State *L) {
struct matrix *A;
float *copy, *d, *s, det;
int n, *ipiv, result, neg, i;
/* check and process arguments */
A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE);
luaL_argcheck(L, A->rows == A->cols, 1, "not square");
n = A->rows;
/* copy matrix */
copy = calloc((size_t)n * n, sizeof(float));
if (copy == NULL) {
return luaL_error(L, "cannot allocate values");
}
d = copy;
s = A->values;
for (i = 0; i < n; i++) {
memcpy(d, s, (size_t)n * sizeof(float));
d += n;
s += A->ld;
}
/* invoke subprograms */
ipiv = calloc(n, sizeof(lapack_int));
if (ipiv == NULL) {
free(copy);
return luaL_error(L, "cannot allocate indexes");
}
result = LAPACKE_sgetrf(A->order, n, n, copy, n, ipiv);
if (result != 0) {
free(copy);
free(ipiv);
lua_pushnumber(L, 0.0);
return 1;
}
/* calculate determinant */
det = 1.0;
neg = 0;
for (i = 0; i < n; i++) {
det *= copy[(size_t)i * n + i];
if (ipiv[i] != i + 1) {
neg = !neg;
}
}
free(copy);
free(ipiv);
lua_pushnumber(L, neg ? -det : det);
return 1;
}
/*
* Exported functions.
*/
int luaopen_linear (lua_State *L) {
static const luaL_Reg FUNCTIONS[] = {
{ "vector", vector },
{ "matrix", matrix },
{ "type", type },
{ "size", size },
{ "tvector", tvector },
{ "sub", sub },
{ "unwind", unwind },
{ "reshape", reshape },
{ "totable", totable },
{ "tolinear", tolinear },
{ "dot", dot },
{ "nrm2", nrm2 },
{ "asum", asum },
{ "iamax", iamax },
{ "sum", sum },
{ "swap", swap },
{ "copy", copy },
{ "axpy", axpy },
{ "scal", scal },
{ "set", set },
{ "uniform", uniform },
{ "normal", normal },
{ "inc", inc },
{ "mul", mul },
{ "pow", powx },
{ "sign", sign },
{ "abs", absx },
{ "exp", expx },
{ "log", logx },
{ "logistic", logistic },
{ "tanh", tanhx },
{ "softplus", softplus },
{ "rectifier", rectifier },
{ "apply", applyx },
{ "gemv", gemv },
{ "ger", ger },
{ "gemm", gemm },
{ "gesv", gesv },
{ "gels", gels },
{ "inv", inv },
{ "det", det },
{ NULL, NULL }
};
/* register functions */
#if LUA_VERSION_NUM >= 502
luaL_newlib(L, FUNCTIONS);
#else
luaL_register(L, luaL_checkstring(L, 1), FUNCTIONS);
#endif
/* vector metatable */
luaL_newmetatable(L, LUALINEAR_VECTOR_METATABLE);
lua_pushcfunction(L, vector_len);
lua_setfield(L, -2, "__len");
lua_pushcfunction(L, vector_index);
lua_setfield(L, -2, "__index");
lua_pushcfunction(L, vector_newindex);
lua_setfield(L, -2, "__newindex");
lua_pushcfunction(L, vector_ipairs);
lua_setfield(L, -2, "__ipairs");
lua_pushcfunction(L, vector_tostring);
lua_setfield(L, -2, "__tostring");
lua_pushcfunction(L, vector_free);
lua_setfield(L, -2, "__gc");
lua_pop(L, 1);
/* matrix metatable */
luaL_newmetatable(L, LUALINEAR_MATRIX_METATABLE);
lua_pushcfunction(L, matrix_len);
lua_setfield(L, -2, "__len");
lua_pushcfunction(L, matrix_index);
lua_setfield(L, -2, "__index");
lua_pushcfunction(L, matrix_ipairs);
lua_setfield(L, -2, "__ipairs");
lua_pushcfunction(L, matrix_tostring);
lua_setfield(L, -2, "__tostring");
lua_pushcfunction(L, matrix_free);
lua_setfield(L, -2, "__gc");
lua_pop(L, 1);
return 1;
}
|
Parallelizer.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H
#if EIGEN_HAS_CXX11_ATOMIC
#include <atomic>
#endif
namespace Eigen {
namespace internal {
/** \internal */
inline void manage_multi_threading(Action action, int* v)
{
static int m_maxThreads = -1;
EIGEN_UNUSED_VARIABLE(m_maxThreads)
if(action==SetAction)
{
eigen_internal_assert(v!=0);
m_maxThreads = *v;
}
else if(action==GetAction)
{
eigen_internal_assert(v!=0);
#ifdef EIGEN_HAS_OPENMP
if(m_maxThreads>0)
*v = m_maxThreads;
else
*v = omp_get_max_threads();
#else
*v = 1;
#endif
}
else
{
eigen_internal_assert(false);
}
}
}
/** Must be call first when calling Eigen from multiple threads */
inline void initParallel()
{
int nbt;
internal::manage_multi_threading(GetAction, &nbt);
std::ptrdiff_t l1, l2, l3;
internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
}
/** \returns the max number of threads reserved for Eigen
* \sa setNbThreads */
inline int nbThreads()
{
int ret;
internal::manage_multi_threading(GetAction, &ret);
return ret;
}
/** Sets the max number of threads reserved for Eigen
* \sa nbThreads */
inline void setNbThreads(int v)
{
internal::manage_multi_threading(SetAction, &v);
}
namespace internal {
template<typename Index> struct GemmParallelInfo
{
GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}
// volatile is not enough on all architectures (see bug 1572)
// to guarantee that when thread A says to thread B that it is
// done with packing a block, then all writes have been really
// carried out... C++11 memory model+atomic guarantees this.
#if EIGEN_HAS_CXX11_ATOMIC
std::atomic<Index> sync;
std::atomic<int> users;
#else
Index volatile sync;
int volatile users;
#endif
Index lhs_start;
Index lhs_length;
};
template<bool Condition, typename Functor, typename Index>
void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose)
{
// TODO when EIGEN_USE_BLAS is defined,
// we should still enable OMP for other scalar types
// Without C++11, we have to disable GEMM's parallelization on
// non x86 architectures because there volatile is not enough for our purpose.
// See bug 1572.
#if (! defined(EIGEN_HAS_OPENMP)) || defined(EIGEN_USE_BLAS) || ((!EIGEN_HAS_CXX11_ATOMIC) && !(EIGEN_ARCH_i386_OR_x86_64))
// FIXME the transpose variable is only needed to properly split
// the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole
// parallelizer mechanism has to be redesigned anyway.
EIGEN_UNUSED_VARIABLE(depth);
EIGEN_UNUSED_VARIABLE(transpose);
func(0,rows, 0,cols);
#else
// Dynamically check whether we should enable or disable OpenMP.
// The conditions are:
// - the max number of threads we can create is greater than 1
// - we are not already in a parallel code
// - the sizes are large enough
// compute the maximal number of threads from the size of the product:
// This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once.
Index size = transpose ? rows : cols;
Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr);
// compute the maximal number of threads from the total amount of work:
double work = static_cast<double>(rows) * static_cast<double>(cols) *
static_cast<double>(depth);
double kMinTaskSize = 50000; // FIXME improve this heuristic.
pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, static_cast<Index>( work / kMinTaskSize ) ));
// compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), pb_max_threads);
// if multi-threading is explicitly disabled, not useful, or if we already are in a parallel session,
// then abort multi-threading
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
return func(0,rows, 0,cols);
Eigen::initParallel();
func.initParallelSession(threads);
if(transpose)
std::swap(rows,cols);
ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0);
#pragma omp parallel num_threads(threads)
{
Index i = omp_get_thread_num();
// Note that the actual number of threads might be lower than the number of request ones.
Index actual_threads = omp_get_num_threads();
Index blockCols = (cols / actual_threads) & ~Index(0x3);
Index blockRows = (rows / actual_threads);
blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr;
Index r0 = i*blockRows;
Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
Index c0 = i*blockCols;
Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
info[i].lhs_start = r0;
info[i].lhs_length = actualBlockRows;
if(transpose) func(c0, actualBlockCols, 0, rows, info);
else func(0, rows, c0, actualBlockCols, info);
}
#endif
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_PARALLELIZER_H
|
expected_output.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
//---------------------------------------------------------------------
// program FT
//---------------------------------------------------------------------
//----------
// Class S:
//----------
//----------
// Class W:
//----------
//----------
// Class A:
//----------
//----------
// Class B:
//----------
//----------
// Class C:
//----------
//----------
// Class D:
//----------
//----------
// Class E:
//----------
struct anon_NAS_FT_c_116 {
double real;
double imag;
};
typedef struct anon_NAS_FT_c_116 dcomplex;
/*common /timerscomm/*/
dcomplex dcmplx_div(dcomplex z1, dcomplex z2) {
double a = z1.real;
double b = z1.imag;
double c = z2.real;
double d = z2.imag;
double divisor = c * c + d * d;
double real = (a * c + b * d) / divisor;
double imag = (b * c - a * d) / divisor;
dcomplex result = (dcomplex){real, imag};
return result;
}
/*common /blockinfo/*/
int fftblock;
/*common /workarr/*/
dcomplex plane[4224];
dcomplex scr[128][33];
// for checksum data
/*common /sumcomm/*/
dcomplex sums[7];
/*common /mainarrays/*/
double twiddle[32][128][129];
dcomplex xnt[32][128][129];
dcomplex y[32][128][129];
void appft(int niter, double *total_time, int *verified);
void CompExp(int n, dcomplex exponent[n]);
int ilog2(int n);
void CalculateChecksum(dcomplex *csum, int iterN, int d1, int d2, int d3, dcomplex u[d3][d2][d1 + 1]);
void compute_initial_conditions(int d1, int d2, int d3, dcomplex u0[d3][d2][d1 + 1]);
void evolve(int nx, int ny, int nz, dcomplex x[nz][ny][nx + 1], dcomplex y[nz][ny][nx + 1], double twiddle[nz][ny][nx + 1]);
void fftXYZ(int sign, int n1, int n2, int n3, dcomplex x[n3][n2][n1 + 1], dcomplex xout[(n1 + 1) * n2 * n3], dcomplex exp1[n1], dcomplex exp2[n2], dcomplex exp3[n3]);
void verify(int n1, int n2, int n3, int nt, dcomplex cksum[nt + 1], int *verified);
double randlc(double *x, double a);
void vranlc(int n, double *x, double a, double y[]);
char getclass();
void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int verified);
double start[64];
double elapsed[64];
double elapsed_time();
void timer_clear(int n);
void timer_start(int n);
void timer_stop(int n);
double timer_read(int n);
void wtime(double *t);
int main(int argc, char *argv[]) {
int niter;
char Class;
double total_time, mflops;
int verified;
niter = 6;
printf("\n\n NAS Parallel Benchmarks (NPB3.3-SER-C) - FT Benchmark\n\n");
printf(" Size : %4dx%4dx%4d\n", 128, 128, 32);
printf(" Iterations : %10d\n", niter);
printf("\n");
Class = getclass();
appft(niter, &total_time, &verified);
if(total_time != 0.0) {
mflops = 1.0e-6 * (double) 524288 * (14.8157 + 7.19641 * log((double) 524288) + (5.23518 + 7.21113 * log((double) 524288)) * niter) / total_time;
}
else {
mflops = 0.0;
}
print_results("FT", Class, 128, 128, 32, niter, total_time, mflops, " floating point", verified);
int exitValue = verified ? 0 : 1;
return exitValue;
}
char getclass() {
if((128 == 64) && (128 == 64) && (32 == 64) && (6 == 6)) {
return 'S';
}
else if((128 == 128) && (128 == 128) && (32 == 32) && (6 == 6)) {
return 'W';
}
else if((128 == 256) && (128 == 256) && (32 == 128) && (6 == 6)) {
return 'A';
}
else if((128 == 512) && (128 == 256) && (32 == 256) && (6 == 20)) {
return 'B';
}
else if((128 == 512) && (128 == 512) && (32 == 512) && (6 == 20)) {
return 'C';
}
else if((128 == 2048) && (128 == 1024) && (32 == 1024) && (6 == 25)) {
return 'D';
}
else {
return 'U';
}
}
void appft(int niter, double *total_time, int *verified) {
int i, j, k, kt, n12, n22, n32, ii, jj, kk, ii2, ik2;
double ap;
dcomplex exp1[128];
dcomplex exp2[128];
dcomplex exp3[32];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(i = 1; i <= 15; i++) {
timer_clear(i);
}
timer_start(2);
compute_initial_conditions(128, 128, 32, xnt);
CompExp(128, exp1);
CompExp(128, exp2);
CompExp(32, exp3);
fftXYZ(1, 128, 128, 32, xnt, (dcomplex *) y, exp1, exp2, exp3);
timer_stop(2);
timer_start(1);
n12 = 128 / 2;
n22 = 128 / 2;
n32 = 32 / 2;
ap = -4.0 * 1.0e-6 * (3.141592653589793238 * 3.141592653589793238);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(i = 0; i < 32; i++) {
ii = i - (i / n32) * 32;
ii2 = ii * ii;
#pragma omp parallel for default(shared) private(k, j, kk, ik2, jj) firstprivate(n22, ii2, n12, ap, i)
for(k = 0; k < 128; k++) {
kk = k - (k / n22) * 128;
ik2 = ii2 + kk * kk;
// #pragma omp parallel for default(shared) private(j, jj) firstprivate(n12, ik2, ap, i, k)
for(j = 0; j < 128; j++) {
jj = j - (j / n12) * 128;
twiddle[i][k][j] = exp(ap * (double) (jj * jj + ik2));
}
}
}
compute_initial_conditions(128, 128, 32, xnt);
fftXYZ(1, 128, 128, 32, xnt, (dcomplex *) y, exp1, exp2, exp3);
/*************** Clava msgError **************
Variables Access as passed arguments Can not be traced inside of function calls :
fftXYZ#223{fftXYZ(-1, 128, 128, 32, xnt, (dcomplex *) xnt, exp1, exp2, exp3)}
CalculateChecksum#224{CalculateChecksum(&sums[kt], kt, 128, 128, 32, xnt)}
****************************************/
for(kt = 1; kt <= niter; kt++) {
evolve(128, 128, 32, xnt, y, twiddle);
fftXYZ(-1, 128, 128, 32, xnt, (dcomplex *) xnt, exp1, exp2, exp3);
CalculateChecksum(&sums[kt], kt, 128, 128, 32, xnt);
}
// Verification test.
verify(128, 128, 32, niter, sums, verified);
timer_stop(1);
*total_time = timer_read(1);
}
//---------------------------------------------------------------------
// compute the roots-of-unity array that will be used for subsequent FFTs.
//---------------------------------------------------------------------
void CompExp(int n, dcomplex exponent[n]) {
int m, nu, ku, i, j, ln;
double t, ti;
double const pi = 3.141592653589793238;
nu = n;
m = ilog2(n);
exponent[0] = (dcomplex){m, 0.0};
ku = 2;
ln = 1;
/*************** Clava msgError **************
Variable ln could not be categorized into any OpenMP Variable Scopeuse : RW
Variable ku could not be categorized into any OpenMP Variable Scopeuse : RW
****************************************/
for(j = 1; j <= m; j++) {
t = pi / ln;
#pragma omp parallel for default(shared) private(i, ti) firstprivate(ln, t, ku)
for(i = 0; i <= ln - 1; i++) {
ti = i * t;
exponent[i + ku - 1] = (dcomplex){cos(ti), sin(ti)};
}
ku = ku + ln;
ln = 2 * ln;
}
}
int ilog2(int n) {
int nn, lg;
if(n == 1) return 0;
lg = 1;
nn = 2;
while(nn < n) {
nn = nn * 2;
lg = lg + 1;
}
return lg;
}
//---------------------------------------------------------------------
// compute a^exponent mod 2^46
//---------------------------------------------------------------------
double ipow46(double a, int exponent) {
double result, dummy, q, r;
int n, n2;
//---------------------------------------------------------------------
// Use
// a^n = a^(n/2)*a^(n/2) if n even else
// a^n = a*a^(n-1) if n odd
//---------------------------------------------------------------------
result = 1;
if(exponent == 0) return result;
q = a;
r = 1;
n = exponent;
while(n > 1) {
n2 = n / 2;
if(n2 * 2 == n) {
dummy = randlc(&q, q);
n = n2;
}
else {
dummy = randlc(&r, q);
n = n - 1;
}
}
dummy = randlc(&r, q);
result = r;
return result;
}
void CalculateChecksum(dcomplex *csum, int iterN, int d1, int d2, int d3, dcomplex u[d3][d2][d1 + 1]) {
int i, i1, ii, ji, ki;
dcomplex csum_temp = (dcomplex){0.0, 0.0};
/*************** Clava msgError **************
NO subscript for Array Access (u[ki][ji][ii]).real
****************************************/
for(i = 1; i <= 1024; i++) {
i1 = i;
ii = i1 % d1;
ji = 3 * i1 % d2;
ki = 5 * i1 % d3;
csum_temp = (dcomplex){(csum_temp).real + (u[ki][ji][ii]).real, (csum_temp).imag + (u[ki][ji][ii]).imag};
}
csum_temp = (dcomplex){(csum_temp).real / ((double) (d1 * d2 * d3)), (csum_temp).imag / ((double) (d1 * d2 * d3))};
printf(" T =%5d Checksum =%22.12E%22.12E\n", iterN, csum_temp.real, csum_temp.imag);
*csum = csum_temp;
}
void compute_initial_conditions(int d1, int d2, int d3, dcomplex u0[d3][d2][d1 + 1]) {
dcomplex tmp[128];
double x0, start, an, dummy;
double RanStarts[128];
int i, j, k;
double const seed = 314159265.0;
double const a = 1220703125.0;
start = seed;
//---------------------------------------------------------------------
// Jump to the starting element for our first plane.
//---------------------------------------------------------------------
an = ipow46(a, 0);
dummy = randlc(&start, an);
an = ipow46(a, 2 * d1 * d2);
//---------------------------------------------------------------------
// Go through by z planes filling in one square at a time.
//---------------------------------------------------------------------
RanStarts[0] = start;
/*************** Clava msgError **************
Variable start could not be categorized into any OpenMP Variable Scopeuse : RWR
****************************************/
for(k = 1; k < d3; k++) {
dummy = randlc(&start, an);
RanStarts[k] = start;
}
/*************** Clava msgError **************
Variables Access as passed arguments Can not be traced inside of function calls :
vranlc#380{vranlc(2 * d1, &x0, a, (double *) tmp)}
****************************************/
for(k = 0; k < d3; k++) {
x0 = RanStarts[k];
/*************** Clava msgError **************
Variables Access as passed arguments Can not be traced inside of function calls :
vranlc#380{vranlc(2 * d1, &x0, a, (double *) tmp)}
****************************************/
for(j = 0; j < d2; j++) {
vranlc(2 * d1, &x0, a, (double *) tmp);
#pragma omp parallel for default(shared) private(i) firstprivate(d1, k, j, tmp)
for(i = 0; i < d1; i++) {
u0[k][j][i] = tmp[i];
}
}
}
}
void evolve(int nx, int ny, int nz, dcomplex x[nz][ny][nx + 1], dcomplex y[nz][ny][nx + 1], double twiddle[nz][ny][nx + 1]) {
int i, j, k;
/*************** Clava msgError **************
NO subscript for Array Access (y[i][k][j]).real
****************************************/
for(i = 0; i < nz; i++) {
/*************** Clava msgError **************
NO subscript for Array Access (y[i][k][j]).real
****************************************/
for(k = 0; k < ny; k++) {
/*************** Clava msgError **************
NO subscript for Array Access (y[i][k][j]).real
****************************************/
for(j = 0; j < nx; j++) {
y[i][k][j] = (dcomplex){(y[i][k][j]).real * (twiddle[i][k][j]), (y[i][k][j]).imag * (twiddle[i][k][j])};
x[i][k][j] = y[i][k][j];
}
}
}
}
//---------------------------------------------------------------------
// Computes NY N-point complex-to-complex FFTs of X using an algorithm due
// to Swarztrauber. X is both the input and the output array, while Y is a
// scratch array. It is assumed that N = 2^M. Before calling
// Swarztrauber to
// perform FFTs
//---------------------------------------------------------------------
void Swarztrauber(int is, int m, int vlen, int n, int xd1, void *ox, dcomplex exponent[n]) {
dcomplex (*x)[xd1] = (dcomplex (*)[xd1]) ox;
int i, j, l;
dcomplex u1, x11, x21;
int k, n1, li, lj, lk, ku, i11, i12, i21, i22;
//---------------------------------------------------------------------
// Perform one variant of the Stockham FFT.
//---------------------------------------------------------------------
n1 = n / 2;
lj = 1;
li = 1 << m;
/*************** Clava msgError **************
loop-step expression is not in canonical form: detected step operation is add_assign, expected one of assign, post_inc, pre_inc, pre_dec, post_dec, add, sub
****************************************/
for(l = 1; l <= m; l += 2) {
lk = lj;
lj = 2 * lk;
li = li / 2;
ku = li;
/*************** Clava msgError **************
NO subscript for Array Access (exponent[ku + i]).imag
****************************************/
for(i = 0; i <= li - 1; i++) {
i11 = i * lk;
i12 = i11 + n1;
i21 = i * lj;
i22 = i21 + lk;
if(is >= 1) {
u1 = exponent[ku + i];
}
else {
u1 = (dcomplex){(exponent[ku + i]).real, -1.0 * (exponent[ku + i]).imag};
}
/*************** Clava msgError **************
unsolved dependency for arrayAccess scr use : W
****************************************/
for(k = 0; k <= lk - 1; k++) {
#pragma omp parallel for default(shared) private(j, x11, x21) firstprivate(vlen, i11, k, i12, i21, u1, i22, x)
for(j = 0; j < vlen; j++) {
x11 = x[i11 + k][j];
x21 = x[i12 + k][j];
scr[i21 + k][j] = (dcomplex){(x11).real + (x21).real, (x11).imag + (x21).imag};
scr[i22 + k][j] = (dcomplex){((u1).real * ((dcomplex){(x11).real - (x21).real, (x11).imag - (x21).imag}).real) - ((u1).imag * ((dcomplex){(x11).real - (x21).real, (x11).imag - (x21).imag}).imag), ((u1).real * ((dcomplex){(x11).real - (x21).real, (x11).imag - (x21).imag}).imag) + ((u1).imag * ((dcomplex){(x11).real - (x21).real, (x11).imag - (x21).imag}).real)};
}
}
}
if(l == m) {
#pragma omp parallel for default(shared) private(k, j) firstprivate(n, vlen, scr)
for(k = 0; k < n; k++) {
// #pragma omp parallel for default(shared) private(j) firstprivate(vlen, k, scr)
for(j = 0; j < vlen; j++) {
x[k][j] = scr[k][j];
}
}
}
else {
lk = lj;
lj = 2 * lk;
li = li / 2;
ku = li;
/*************** Clava msgError **************
NO subscript for Array Access (exponent[ku + i]).imag
****************************************/
for(i = 0; i <= li - 1; i++) {
i11 = i * lk;
i12 = i11 + n1;
i21 = i * lj;
i22 = i21 + lk;
if(is >= 1) {
u1 = exponent[ku + i];
}
else {
u1 = (dcomplex){(exponent[ku + i]).real, -1.0 * (exponent[ku + i]).imag};
}
/*************** Clava msgError **************
unsolved dependency for arrayAccess x use : W
****************************************/
for(k = 0; k <= lk - 1; k++) {
#pragma omp parallel for default(shared) private(j, x11, x21) firstprivate(vlen, i11, k, i12, i21, u1, i22, scr)
for(j = 0; j < vlen; j++) {
x11 = scr[i11 + k][j];
x21 = scr[i12 + k][j];
x[i21 + k][j] = (dcomplex){(x11).real + (x21).real, (x11).imag + (x21).imag};
x[i22 + k][j] = (dcomplex){((u1).real * ((dcomplex){(x11).real - (x21).real, (x11).imag - (x21).imag}).real) - ((u1).imag * ((dcomplex){(x11).real - (x21).real, (x11).imag - (x21).imag}).imag), ((u1).real * ((dcomplex){(x11).real - (x21).real, (x11).imag - (x21).imag}).imag) + ((u1).imag * ((dcomplex){(x11).real - (x21).real, (x11).imag - (x21).imag}).real)};
}
}
}
}
}
}
void fftXYZ(int sign, int n1, int n2, int n3, dcomplex x[n3][n2][n1 + 1], dcomplex xout[(n1 + 1) * n2 * n3], dcomplex exp1[n1], dcomplex exp2[n2], dcomplex exp3[n3]) {
int i, j, k, log;
int bls, ble;
int len;
int blkp;
fftblock = 8192 / n1;
if(fftblock >= 32) fftblock = 32;
blkp = fftblock + 1;
log = ilog2(n1);
/*************** Clava msgError **************
Variables Access as passed arguments Can not be traced inside of function calls :
Swarztrauber#536{Swarztrauber(sign, log, len, n1, blkp, plane, exp1)}
****************************************/
for(k = 0; k < n3; k++) {
/*************** Clava msgError **************
loop-step expression is not in canonical form: detected step operation is add_assign, expected one of assign, post_inc, pre_inc, pre_dec, post_dec, add, sub
****************************************/
for(bls = 0; bls < n2; bls += fftblock) {
ble = bls + fftblock - 1;
if(ble > n2) ble = n2 - 1;
len = ble - bls + 1;
#pragma omp parallel for default(shared) private(j, i) firstprivate(bls, ble, n1, blkp, k, x)
for(j = bls; j <= ble; j++) {
// #pragma omp parallel for default(shared) private(i) firstprivate(n1, j, bls, blkp, k, x)
for(i = 0; i < n1; i++) {
plane[j - bls + blkp * i] = x[k][j][i];
}
}
Swarztrauber(sign, log, len, n1, blkp, plane, exp1);
#pragma omp parallel for default(shared) private(j, i) firstprivate(bls, ble, n1, blkp, k, plane)
for(j = bls; j <= ble; j++) {
// #pragma omp parallel for default(shared) private(i) firstprivate(n1, j, bls, blkp, k, plane)
for(i = 0; i < n1; i++) {
x[k][j][i] = plane[j - bls + blkp * i];
}
}
}
}
fftblock = 8192 / n2;
if(fftblock >= 32) fftblock = 32;
blkp = fftblock + 1;
log = ilog2(n2);
/*************** Clava msgError **************
Variables Access as passed arguments Can not be traced inside of function calls :
Swarztrauber#565{Swarztrauber(sign, log, len, n2, n1 + 1, &x[k][0][bls], exp2)}
****************************************/
for(k = 0; k < n3; k++) {
/*************** Clava msgError **************
loop-step expression is not in canonical form: detected step operation is add_assign, expected one of assign, post_inc, pre_inc, pre_dec, post_dec, add, sub
****************************************/
for(bls = 0; bls < n1; bls += fftblock) {
ble = bls + fftblock - 1;
if(ble > n1) ble = n1 - 1;
len = ble - bls + 1;
Swarztrauber(sign, log, len, n2, n1 + 1, &x[k][0][bls], exp2);
}
}
fftblock = 8192 / n3;
if(fftblock >= 32) fftblock = 32;
blkp = fftblock + 1;
log = ilog2(n3);
/*************** Clava msgError **************
Variables Access as passed arguments Can not be traced inside of function calls :
Swarztrauber#594{Swarztrauber(sign, log, len, n3, blkp, plane, exp3)}
****************************************/
for(k = 0; k < n2; k++) {
/*************** Clava msgError **************
loop-step expression is not in canonical form: detected step operation is add_assign, expected one of assign, post_inc, pre_inc, pre_dec, post_dec, add, sub
****************************************/
for(bls = 0; bls < n1; bls += fftblock) {
ble = bls + fftblock - 1;
if(ble > n1) ble = n1 - 1;
len = ble - bls + 1;
#pragma omp parallel for default(shared) private(i, j) firstprivate(n3, bls, ble, blkp, k, x)
for(i = 0; i < n3; i++) {
// #pragma omp parallel for default(shared) private(j) firstprivate(bls, ble, blkp, i, k, x)
for(j = bls; j <= ble; j++) {
plane[j - bls + blkp * i] = x[i][k][j];
}
}
Swarztrauber(sign, log, len, n3, blkp, plane, exp3);
#pragma omp parallel for default(shared) private(i, j) firstprivate(n3, bls, ble, n2, n1, k, blkp, plane)
for(i = 0; i <= n3 - 1; i++) {
// #pragma omp parallel for default(shared) private(j) firstprivate(bls, ble, n2, i, n1, k, blkp, plane)
for(j = bls; j <= ble; j++) {
xout[j + (n1 + 1) * (k + n2 * i)] = plane[j - bls + blkp * i];
}
}
}
}
}
// FT verification routine.
void verify(int n1, int n2, int n3, int nt, dcomplex cksum[nt + 1], int *verified) {
// Local variables.
int kt;
dcomplex cexpd[26];
double epsilon, err;
// Initialize tolerance level and success flag.
epsilon = 1.0e-12;
*verified = 1;
if((n1 == 64) && (n2 == 64) && (n3 == 64) && (nt == 6)) {
// Class S reference values.
cexpd[1] = (dcomplex){554.6087004964, 484.5363331978};
cexpd[2] = (dcomplex){554.6385409189, 486.5304269511};
cexpd[3] = (dcomplex){554.6148406171, 488.3910722336};
cexpd[4] = (dcomplex){554.5423607415, 490.1273169046};
cexpd[5] = (dcomplex){554.4255039624, 491.7475857993};
cexpd[6] = (dcomplex){554.2683411902, 493.2597244941};
}
else if((n1 == 128) && (n2 == 128) && (n3 == 32) && (nt == 6)) {
// Class W reference values.
cexpd[1] = (dcomplex){567.3612178944, 529.3246849175};
cexpd[2] = (dcomplex){563.1436885271, 528.2149986629};
cexpd[3] = (dcomplex){559.4024089970, 527.0996558037};
cexpd[4] = (dcomplex){556.0698047020, 526.0027904925};
cexpd[5] = (dcomplex){553.0898991250, 524.9400845633};
cexpd[6] = (dcomplex){550.4159734538, 523.9212247086};
}
else if((n1 == 256) && (n2 == 256) && (n3 == 128) && (nt == 6)) {
// Class A reference values.
cexpd[1] = (dcomplex){504.6735008193, 511.4047905510};
cexpd[2] = (dcomplex){505.9412319734, 509.8809666433};
cexpd[3] = (dcomplex){506.9376896287, 509.8144042213};
cexpd[4] = (dcomplex){507.7892868474, 510.1336130759};
cexpd[5] = (dcomplex){508.5233095391, 510.4914655194};
cexpd[6] = (dcomplex){509.1487099959, 510.7917842803};
}
else if((n1 == 512) && (n2 == 256) && (n3 == 256) && (nt == 20)) {
// Class B reference values.
cexpd[1] = (dcomplex){517.7643571579, 507.7803458597};
cexpd[2] = (dcomplex){515.4521291263, 508.8249431599};
cexpd[3] = (dcomplex){514.6409228649, 509.6208912659};
cexpd[4] = (dcomplex){514.2378756213, 510.1023387619};
cexpd[5] = (dcomplex){513.9626667737, 510.3976610617};
cexpd[6] = (dcomplex){513.7423460082, 510.5948019802};
cexpd[7] = (dcomplex){513.5547056878, 510.7404165783};
cexpd[8] = (dcomplex){513.3910925466, 510.8576573661};
cexpd[9] = (dcomplex){513.2470705390, 510.9577278523};
cexpd[10] = (dcomplex){513.1197729984, 511.0460304483};
cexpd[11] = (dcomplex){513.0070319283, 511.1252433800};
cexpd[12] = (dcomplex){512.9070537032, 511.1968077718};
cexpd[13] = (dcomplex){512.8182883502, 511.2616233064};
cexpd[14] = (dcomplex){512.7393733383, 511.3203605551};
cexpd[15] = (dcomplex){512.6691062020, 511.3735928093};
cexpd[16] = (dcomplex){512.6064276004, 511.4218460548};
cexpd[17] = (dcomplex){512.5504076570, 511.4656139760};
cexpd[18] = (dcomplex){512.5002331720, 511.5053595966};
cexpd[19] = (dcomplex){512.4551951846, 511.5415130407};
cexpd[20] = (dcomplex){512.4146770029, 511.5744692211};
}
else if((n1 == 512) && (n2 == 512) && (n3 == 512) && (nt == 20)) {
// Class C reference values.
cexpd[1] = (dcomplex){519.5078707457, 514.9019699238};
cexpd[2] = (dcomplex){515.5422171134, 512.7578201997};
cexpd[3] = (dcomplex){514.4678022222, 512.2251847514};
cexpd[4] = (dcomplex){514.0150594328, 512.1090289018};
cexpd[5] = (dcomplex){513.7550426810, 512.1143685824};
cexpd[6] = (dcomplex){513.5811056728, 512.1496764568};
cexpd[7] = (dcomplex){513.4569343165, 512.1870921893};
cexpd[8] = (dcomplex){513.3651975661, 512.2193250322};
cexpd[9] = (dcomplex){513.2955192805, 512.2454735794};
cexpd[10] = (dcomplex){513.2410471738, 512.2663649603};
cexpd[11] = (dcomplex){513.1971141679, 512.2830879827};
cexpd[12] = (dcomplex){513.1605205716, 512.2965869718};
cexpd[13] = (dcomplex){513.1290734194, 512.3075927445};
cexpd[14] = (dcomplex){513.1012720314, 512.3166486553};
cexpd[15] = (dcomplex){513.0760908195, 512.3241541685};
cexpd[16] = (dcomplex){513.0528295923, 512.3304037599};
cexpd[17] = (dcomplex){513.0310107773, 512.3356167976};
cexpd[18] = (dcomplex){513.0103090133, 512.3399592211};
cexpd[19] = (dcomplex){512.9905029333, 512.3435588985};
cexpd[20] = (dcomplex){512.9714421109, 512.3465164008};
}
else if((n1 == 2048) && (n2 == 1024) && (n3 == 1024) && (nt == 25)) {
// Class D reference values.
cexpd[1] = (dcomplex){512.2230065252, 511.8534037109};
cexpd[2] = (dcomplex){512.0463975765, 511.7061181082};
cexpd[3] = (dcomplex){511.9865766760, 511.7096364601};
cexpd[4] = (dcomplex){511.9518799488, 511.7373863950};
cexpd[5] = (dcomplex){511.9269088223, 511.7680347632};
cexpd[6] = (dcomplex){511.9082416858, 511.7967875532};
cexpd[7] = (dcomplex){511.8943814638, 511.8225281841};
cexpd[8] = (dcomplex){511.8842385057, 511.8451629348};
cexpd[9] = (dcomplex){511.8769435632, 511.8649119387};
cexpd[10] = (dcomplex){511.8718203448, 511.8820803844};
cexpd[11] = (dcomplex){511.8683569061, 511.8969781011};
cexpd[12] = (dcomplex){511.8661708593, 511.9098918835};
cexpd[13] = (dcomplex){511.8649768950, 511.9210777066};
cexpd[14] = (dcomplex){511.8645605626, 511.9307604484};
cexpd[15] = (dcomplex){511.8647586618, 511.9391362671};
cexpd[16] = (dcomplex){511.8654451572, 511.9463757241};
cexpd[17] = (dcomplex){511.8665212451, 511.9526269238};
cexpd[18] = (dcomplex){511.8679083821, 511.9580184108};
cexpd[19] = (dcomplex){511.8695433664, 511.9626617538};
cexpd[20] = (dcomplex){511.8713748264, 511.9666538138};
cexpd[21] = (dcomplex){511.8733606701, 511.9700787219};
cexpd[22] = (dcomplex){511.8754661974, 511.9730095953};
cexpd[23] = (dcomplex){511.8776626738, 511.9755100241};
cexpd[24] = (dcomplex){511.8799262314, 511.9776353561};
cexpd[25] = (dcomplex){511.8822370068, 511.9794338060};
}
else if((n1 == 4096) && (n2 == 2048) && (n3 == 2048) && (nt == 25)) {
// Class E reference values.
cexpd[1] = (dcomplex){512.1601045346, 511.7395998266};
cexpd[2] = (dcomplex){512.0905403678, 511.8614716182};
cexpd[3] = (dcomplex){512.0623229306, 511.9074203747};
cexpd[4] = (dcomplex){512.0438418997, 511.9345900733};
cexpd[5] = (dcomplex){512.0311521872, 511.9551325550};
cexpd[6] = (dcomplex){512.0226088809, 511.9720179919};
cexpd[7] = (dcomplex){512.0169296534, 511.9861371665};
cexpd[8] = (dcomplex){512.0131225172, 511.9979364402};
cexpd[9] = (dcomplex){512.0104767108, 512.0077674092};
cexpd[10] = (dcomplex){512.0085127969, 512.0159443121};
cexpd[11] = (dcomplex){512.0069224127, 512.0227453670};
cexpd[12] = (dcomplex){512.0055158164, 512.0284096041};
cexpd[13] = (dcomplex){512.0041820159, 512.0331373793};
cexpd[14] = (dcomplex){512.0028605402, 512.0370938679};
cexpd[15] = (dcomplex){512.0015223011, 512.0404138831};
cexpd[16] = (dcomplex){512.0001570022, 512.0432068837};
cexpd[17] = (dcomplex){511.9987650555, 512.0455615860};
cexpd[18] = (dcomplex){511.9973525091, 512.0475499442};
cexpd[19] = (dcomplex){511.9959279472, 512.0492304629};
cexpd[20] = (dcomplex){511.9945006558, 512.0506508902};
cexpd[21] = (dcomplex){511.9930795911, 512.0518503782};
cexpd[22] = (dcomplex){511.9916728462, 512.0528612016};
cexpd[23] = (dcomplex){511.9902874185, 512.0537101195};
cexpd[24] = (dcomplex){511.9889291565, 512.0544194514};
cexpd[25] = (dcomplex){511.9876028049, 512.0550079284};
}
else {
printf(" Verification test for FT not performed\n");
*verified = 0;
}
// Verification test for results.
if(*verified) {
/*************** Clava msgError **************
Loop contains Invalid Statement -> BreakStmt#770
****************************************/
for(kt = 1; kt <= nt; kt++) {
err = sqrt(((dcmplx_div((dcomplex){(cksum[kt]).real - (cexpd[kt]).real, (cksum[kt]).imag - (cexpd[kt]).imag}, cexpd[kt])).real * (dcmplx_div((dcomplex){(cksum[kt]).real - (cexpd[kt]).real, (cksum[kt]).imag - (cexpd[kt]).imag}, cexpd[kt])).real) + ((dcmplx_div((dcomplex){(cksum[kt]).real - (cexpd[kt]).real, (cksum[kt]).imag - (cexpd[kt]).imag}, cexpd[kt])).imag * (dcmplx_div((dcomplex){(cksum[kt]).real - (cexpd[kt]).real, (cksum[kt]).imag - (cexpd[kt]).imag}, cexpd[kt])).imag));
if(!(err <= epsilon)) {
*verified = 0;
break;
}
}
if(*verified) {
printf(" Verification test for FT successful\n");
}
else {
printf(" Verification test for FT failed\n");
}
}
}
void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int verified) {
char size[16];
int j;
printf("\n\n %s Benchmark Completed.\n", name);
printf(" Class = %12c\n", class);
// If this is not a grid-based problem (EP, FT, CG), then
// we only print n1, which contains some measure of the
// problem size. In that case, n2 and n3 are both zero.
// Otherwise, we print the grid size n1xn2xn3
if((n2 == 0) && (n3 == 0)) {
if((name[0] == 'E') && (name[1] == 'P')) {
sprintf(size, "%15.0lf", pow(2.0, n1));
j = 14;
if(size[j] == '.') {
size[j] = ' ';
j--;
}
size[j + 1] = '\0';
printf(" Size = %15s\n", size);
}
else {
printf(" Size = %12d\n", n1);
}
}
else {
printf(" Size = %4dx%4dx%4d\n", n1, n2, n3);
}
printf(" Iterations = %12d\n", niter);
printf(" Time in seconds = %12.2lf\n", t);
printf(" Mop/s total = %15.2lf\n", mops);
printf(" Operation type = %24s\n", optype);
if(verified) printf(" Verification = %12s\n", "SUCCESSFUL");
else printf(" Verification = %12s\n", "UNSUCCESSFUL");
}
double randlc(double *x, double a) {
//--------------------------------------------------------------------
//
// This routine returns a uniform pseudorandom double precision number in the
// range (0, 1) by using the linear congruential generator
//
// x_{k+1} = a x_k (mod 2^46)
//
// where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
// before repeating. The argument A is the same as 'a' in the above formula,
// and X is the same as x_0. A and X must be odd double precision integers
// in the range (1, 2^46). The returned value RANDLC is normalized to be
// between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain
// the new seed x_1, so that subsequent calls to RANDLC using the same
// arguments will generate a continuous sequence.
//
// This routine should produce the same results on any computer with at least
// 48 mantissa bits in double precision floating point data. On 64 bit
// systems, double precision should be disabled.
//
// David H. Bailey October 26, 1990
//
//--------------------------------------------------------------------
// r23 = pow(0.5, 23.0);
//// pow(0.5, 23.0) = 1.1920928955078125e-07
// r46 = r23 * r23;
// t23 = pow(2.0, 23.0);
//// pow(2.0, 23.0) = 8.388608e+06
// t46 = t23 * t23;
double const r23 = 1.1920928955078125e-07;
double const r46 = r23 * r23;
double const t23 = 8.388608e+06;
double const t46 = t23 * t23;
double t1, t2, t3, t4, a1, a2, x1, x2, z;
double r;
//--------------------------------------------------------------------
// Break A into two parts such that A = 2^23 * A1 + A2.
//--------------------------------------------------------------------
t1 = r23 * a;
a1 = (int) t1;
a2 = a - t23 * a1;
//--------------------------------------------------------------------
// Break X into two parts such that X = 2^23 * X1 + X2, compute
// Z = A1 * X2 + A2 * X1 (mod 2^23), and then
// X = 2^23 * Z + A2 * X2 (mod 2^46).
//--------------------------------------------------------------------
t1 = r23 * (*x);
x1 = (int) t1;
x2 = *x - t23 * x1;
t1 = a1 * x2 + a2 * x1;
t2 = (int) (r23 * t1);
z = t1 - t23 * t2;
t3 = t23 * z + a2 * x2;
t4 = (int) (r46 * t3);
*x = t3 - t46 * t4;
r = r46 * (*x);
return r;
}
void vranlc(int n, double *x, double a, double y[]) {
//--------------------------------------------------------------------
//
// This routine generates N uniform pseudorandom double precision numbers in
// the range (0, 1) by using the linear congruential generator
//
// x_{k+1} = a x_k (mod 2^46)
//
// where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
// before repeating. The argument A is the same as 'a' in the above formula,
// and X is the same as x_0. A and X must be odd double precision integers
// in the range (1, 2^46). The N results are placed in Y and are normalized
// to be between 0 and 1. X is updated to contain the new seed, so that
// subsequent calls to VRANLC using the same arguments will generate a
// continuous sequence. If N is zero, only initialization is performed, and
// the variables X, A and Y are ignored.
//
// This routine is the standard version designed for scalar or RISC systems.
// However, it should produce the same results on any single processor
// computer with at least 48 mantissa bits in double precision floating point
// data. On 64 bit systems, double precision should be disabled.
//
//--------------------------------------------------------------------
// r23 = pow(0.5, 23.0);
//// pow(0.5, 23.0) = 1.1920928955078125e-07
// r46 = r23 * r23;
// t23 = pow(2.0, 23.0);
//// pow(2.0, 23.0) = 8.388608e+06
// t46 = t23 * t23;
double const r23 = 1.1920928955078125e-07;
double const r46 = r23 * r23;
double const t23 = 8.388608e+06;
double const t46 = t23 * t23;
double t1, t2, t3, t4, a1, a2, x1, x2, z;
int i;
//--------------------------------------------------------------------
// Break A into two parts such that A = 2^23 * A1 + A2.
//--------------------------------------------------------------------
t1 = r23 * a;
a1 = (int) t1;
a2 = a - t23 * a1;
//--------------------------------------------------------------------
// Generate N results. This loop is not vectorizable.
//--------------------------------------------------------------------
/*************** Clava msgError **************
Variable x could not be categorized into any OpenMP Variable Scopeuse : RWR
****************************************/
for(i = 0; i < n; i++) {
//--------------------------------------------------------------------
// Break X into two parts such that X = 2^23 * X1 + X2, compute
// Z = A1 * X2 + A2 * X1 (mod 2^23), and then
// X = 2^23 * Z + A2 * X2 (mod 2^46).
//--------------------------------------------------------------------
t1 = r23 * (*x);
x1 = (int) t1;
x2 = *x - t23 * x1;
t1 = a1 * x2 + a2 * x1;
t2 = (int) (r23 * t1);
z = t1 - t23 * t2;
t3 = t23 * z + a2 * x2;
t4 = (int) (r46 * t3);
*x = t3 - t46 * t4;
y[i] = r46 * (*x);
}
return;
}
void wtime(double *t) {
static int sec = -1;
struct timeval tv;
gettimeofday(&tv, (void *) 0);
if(sec < 0) sec = tv.tv_sec;
*t = (tv.tv_sec - sec) + 1.0e-6 * tv.tv_usec;
}
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time() {
double t;
wtime(&t);
return (t);
}
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear(int n) {
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start(int n) {
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop(int n) {
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read(int n) {
return (elapsed[n]);
}
|
coord.c | /*---------------------------------------------------------------------------------
COORD.C
-SET GRID POINTS AT CENTER, CORNER AND FACES
-EVALUATE BL R AND TH FROM KS
-COMPUTE TRANSFORMATION MATRIX FOR KS->MKS OR KS->FMKS
-COMPUTE METRIC COEFFICIENTS IN MKS/FMKS
-COMPUTE LIGHT-CROSSING TIME
-INITIALIZE FAILURE FLAGS TO ZERO
---------------------------------------------------------------------------------*/
/*
* -- given the indices i,j and location in the cell, return with
* the values of X1,X2 there;
* -- the locations are defined by :
* -----------------------
* | |
* | |
* |FACE1 CENT |
* | |
* |CORN FACE2 |
* ----------------------
*
*/
#include "decs.h"
double thG_of_X(const double X[NDIM]);
void thJ_of_X(const double X[NDIM], double *y, double* thJ);
double r_of_X(const double X[NDIM]);
double th_of_X(const double X[NDIM]);
// Set coordinate values at grid loc [i,j,LOC]
inline void coord(int i, int j, int loc, double *X)
{
X[0] = 0; // Make sure all memory passed in is initialized
if (loc == FACE1)
{
X[1] = startx[1] + (i - NG) * dx[1];
X[2] = startx[2] + (j + 0.5 - NG) * dx[2];
}
else if (loc == FACE2)
{
X[1] = startx[1] + (i + 0.5 - NG) * dx[1];
X[2] = startx[2] + (j - NG) * dx[2];
}
else if (loc == CENT)
{
X[1] = startx[1] + (i + 0.5 - NG) * dx[1];
X[2] = startx[2] + (j + 0.5 - NG) * dx[2];
}
else if (loc == CORN)
{
X[1] = startx[1] + (i - NG) * dx[1];
X[2] = startx[2] + (j - NG) * dx[2];
}
#if DEBUG
else
{
fprintf(stderr, "Invalid coordinate location!\n");
exit(-1);
}
#endif
}
// Computes theta_G from X2
inline double thG_of_X(const double X[NDIM])
{
return M_PI*X[2] + ((1. - hslope)/2.)*sin(2.*M_PI*X[2]);
}
// Computes theta_J from X2
inline void thJ_of_X(const double X[NDIM], double *y, double* thJ)
{
*y = 2*X[2] - 1.;
*thJ = poly_norm*(*y)*(1. + pow((*y)/poly_xt,poly_alpha)/(poly_alpha+1.)) +
0.5*M_PI;
}
// Computes r from X1
inline double r_of_X(const double X[NDIM])
{
return exp(X[1]);
}
// Computes theta from (X1,X2)
inline double th_of_X(const double X[NDIM])
{
double thG = thG_of_X(X);
#if DEREFINE_POLES
double y, thJ;
thJ_of_X(X, &y, &thJ);
return thG + exp(mks_smooth*(startx[1] - X[1]))*(thJ - thG);
#else
return thG;
#endif
}
// Boyer-Lindquist coordinate of point X
inline void bl_coord(const double X[NDIM], double *r, double *th)
{
*r = r_of_X(X);
*th = th_of_X(X);
// Avoid singularity at polar axis
#if COORDSINGFIX
if (fabs(*th) < SINGSMALL) {
if ((*th) >= 0)
*th = SINGSMALL;
if ((*th) < 0)
*th = -SINGSMALL;
}
if (fabs(M_PI - (*th)) < SINGSMALL) {
if ((*th) >= M_PI)
*th = M_PI + SINGSMALL;
if ((*th) < M_PI)
*th = M_PI - SINGSMALL;
}
#endif
}
// Computes transformation matrix for KS->MKS and KS->FMKS
inline void set_dxdX(double X[NDIM], double dxdX[NDIM][NDIM])
{
memset(dxdX, 0, NDIM*NDIM*sizeof(double));
#if METRIC == MINKOWSKI
for (int mu = 0; mu < NDIM; mu++)
{
dxdX[mu][mu] = 1.;
}
#elif METRIC == MKS && !DEREFINE_POLES
dxdX[0][0] = 1.;
dxdX[1][1] = exp(X[1]);
dxdX[2][2] = M_PI - (hslope - 1.)*M_PI*cos(2.*M_PI*X[2]);
dxdX[3][3] = 1.;
#elif METRIC == MKS && DEREFINE_POLES
dxdX[0][0] = 1.;
dxdX[1][1] = exp(X[1]);
dxdX[2][1] = -exp(mks_smooth*(startx[1]-X[1]))*mks_smooth*(
M_PI/2. -
M_PI*X[2] +
poly_norm*(2.*X[2]-1.)*(1+(pow((-1.+2*X[2])/poly_xt,poly_alpha))/(1 + poly_alpha)) -
1./2.*(1. - hslope)*sin(2.*M_PI*X[2])
);
dxdX[2][2] = M_PI + (1. - hslope)*M_PI*cos(2.*M_PI*X[2]) +
exp(mks_smooth*(startx[1]-X[1]))*(
-M_PI +
2.*poly_norm*(1. + pow((2.*X[2]-1.)/poly_xt,poly_alpha)/(poly_alpha+1.)) +
(2.*poly_alpha*poly_norm*(2.*X[2]-1.)*pow((2.*X[2]-1.)/poly_xt,poly_alpha-1.))/((1.+poly_alpha)*poly_xt) -
(1.-hslope)*M_PI*cos(2.*M_PI*X[2])
);
dxdX[3][3] = 1.;
#else
#error "Unsupported metric!"
#endif
}
// Computes covariant metric in KS
void gcov_func(double X[NDIM], double gcov[NDIM][NDIM])
{
memset(gcov, 0, NDIM*NDIM*sizeof(double));
#if METRIC == MINKOWSKI
gcov[0][0] = -1.;
for (int j = 1; j < NDIM; j++) {
gcov[j][j] = 1.;
}
#else //Everything else is covered in set_dxdX
double sth, cth, s2, rho2;
double r, th;
bl_coord(X, &r, &th);
cth = cos(th);
sth = sin(th);
s2 = sth*sth;
rho2 = r*r + a*a*cth*cth;
gcov[0][0] = -1. + 2.*r/rho2;
gcov[0][1] = 2.*r/rho2;
gcov[0][3] = -2.*a*r*s2/rho2;
gcov[1][0] = gcov[0][1];
gcov[1][1] = 1. + 2.*r/rho2;
gcov[1][3] = -a*s2*(1. + 2.*r/rho2);
gcov[2][2] = rho2;
gcov[3][0] = gcov[0][3];
gcov[3][1] = gcov[1][3];
gcov[3][3] = s2*(rho2 + a*a*s2*(1. + 2.*r/rho2));
// Apply coordinate transformation to code coordinates X
double dxdX[NDIM][NDIM];
set_dxdX(X, dxdX);
double gcov_ks[NDIM][NDIM];
memcpy(gcov_ks, gcov, NDIM*NDIM*sizeof(double));
memset(gcov, 0, NDIM*NDIM*sizeof(double));
for (int mu = 0; mu < NDIM; mu++) {
for (int nu = 0; nu < NDIM; nu++) {
for (int lam = 0; lam < NDIM; lam++) {
for (int kap = 0; kap < NDIM; kap++) {
gcov[mu][nu] += gcov_ks[lam][kap]*dxdX[lam][mu]*dxdX[kap][nu];
}
}
}
}
#endif // METRIC
}
// Establish X coordinates
void set_points()
{
#if METRIC == MINKOWSKI
startx[1] = x1Min;
startx[2] = x2Min;
dx[1] = (x1Max - x1Min)/N1TOT;
dx[2] = (x2Max - x2Min)/N2TOT;
#elif METRIC == MKS
// Set Rin such that we have 5 zones completely inside the event horizon
// If xeh = log(Rhor), xin = log(Rin), and xout = log(Rout),
// then we want xeh = xin + 5.5 * (xout - xin) / N1TOT, or solving/replacing:
Rin = exp((N1TOT * log(Rhor) / 5.5 - log(Rout)) / (-1. + N1TOT / 5.5));
startx[1] = log(Rin);
if (startx[1] < 0.0) ERROR("Not enough radial zones! Increase N1!");
startx[2] = 0.;
dx[1] = log(Rout/Rin)/N1TOT;
dx[2] = 1./N2TOT;
#if DEREFINE_POLES
poly_norm = 0.5*M_PI*1./(1. + 1./(poly_alpha + 1.)*
1./pow(poly_xt, poly_alpha));
#endif
#endif // METRIC
}
// Sets the grid struct G
void set_grid(struct GridGeom *G)
{
// Set up boundaries, steps in coordinate grid
set_points();
dV = dx[1]*dx[2];
#if !INTEL_WORKAROUND
#pragma omp parallel for collapse(2)
#endif
JSLOOP(-NG, N2 - 1 + NG) {
ISLOOP(-NG, N1 - 1 + NG) {
set_grid_loc(G, i, j, CENT);
set_grid_loc(G, i, j, CORN);
set_grid_loc(G, i, j, FACE1);
set_grid_loc(G, i, j, FACE2);
// Connection only needed at zone center
conn_func(G, i, j);
}
}
}
// Makes necessary function calls to set grid at various LOC
inline void set_grid_loc(struct GridGeom *G, int i, int j, int loc)
{
double X[NDIM];
double gcov[NDIM][NDIM], gcon[NDIM][NDIM];
coord(i, j, loc, X);
gcov_func(X, gcov);
G->gdet[loc][j][i] = gcon_func(gcov, gcon);
for (int mu = 0; mu < NDIM; mu++) {
for (int nu = 0; nu < NDIM; nu++) {
G->gcov[loc][mu][nu][j][i] = gcov[mu][nu];
G->gcon[loc][mu][nu][j][i] = gcon[mu][nu];
}
}
G->lapse[loc][j][i] = 1./sqrt(-G->gcon[loc][0][0][j][i]);
}
// Initializes flags and fails to zero
void zero_arrays()
{
ZLOOPALL
{
pflag[j][i] = 0;
fail_save[j][i] = 0;
}
}
|
sequence2batch.h | #ifndef ANAKIN_SABER_FUNC_IMPL_X86_MATH_SEQUENCE_BATCH_H
#define ANAKIN_SABER_FUNC_IMPL_X86_MATH_SEQUENCE_BATCH_H
#include <algorithm>
#include <vector>
#include "saber/core/tensor.h"
#include "saber/funcs/impl/x86/x86_utils.h"
#include "saber/funcs/impl/x86/anakin_thread.h"
namespace anakin {
namespace saber {
namespace math {
template <DataType Dtype, typename LayOutType>
class CopyMatrixRowsFunctor {
public:
typedef Tensor<X86> ioTensor;
typedef typename DataTrait<X86, Dtype>::Dtype dtype;
// If is_src_index is true,
// copy the indexed rows of input src to the output dst.
// If is_src_index is false,
// copy the input src to the indexed rows of output dst.
// The indexed rows are based on the input index.
void operator()(ioTensor* src,
std::vector<int> index_lod, ioTensor* dst,
bool is_src_index, int fragment_num, int offset = 0, int width = 0);
};
template <DataType Dtype, typename LayOutType>
class Seq2BatchFunctor {
// Calculate the length of each sequence and
// sort sequence index by the length.
// example: sequences = {s0, s1, s2}
// s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2
// seq_info[3] = {(4, 5, 1), (0, 4, 0), (9, 3, 2)}
//
struct SeqInfo {
SeqInfo(int start, int length, int seq_idx)
: start(start), length(length), seq_idx(seq_idx) {}
int start;
int length;
int seq_idx;
};
public:
typedef Tensor<X86> ioTensor;
void operator()(ioTensor* seq,
ioTensor* batch, std::vector<std::vector<int>>& seq_to_batch_meta, bool is_cal_batch_lod,
bool is_reverse = false, int fragment_num = 1) const {
if (!is_cal_batch_lod) {
if (seq_to_batch_meta.size() < 2) {
LOG(ERROR) << "The size of seq_to_batch_meta should inlcude at least 2-level sequence information.";
exit(-1);
}
if (seq_to_batch_meta[1].size() != static_cast<int>(seq->num())) {
LOG(ERROR) << "The seq_to_batch information should be consistent with the dims.";
exit(-1);
}
CopyMatrixRowsFunctor<Dtype, LayOutType> to_batch;
to_batch(seq, seq_to_batch_meta[1], batch, true, fragment_num);
return;
}
if (seq_to_batch_meta.size() != 1) {
LOG(ERROR) << "Only support one level sequence now.";
exit(-1);
}
auto seq_meta = seq_to_batch_meta[0];
std::vector<SeqInfo> seq_info;
for (int seq_id = 0; seq_id < seq_meta.size() - 1; ++seq_id) {
int length = seq_meta[seq_id + 1] - seq_meta[seq_id];
seq_info.emplace_back(seq_meta[seq_id], length, seq_id);
//LOG(INFO) << "seq_meta[seq_id]:" << seq_meta[seq_id] << " length:" << length << " seq_id:" <<seq_id;
}
std::sort(seq_info.begin(), seq_info.end(),
[](SeqInfo a, SeqInfo b) {
return a.length > b.length;
});
// Calculate the start position of each batch.
// example: sequences = {s0, s1, s2}
// s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2
// num_batch = 5,
// batchIndex = {b0, b1, b2, b3, b4}
// b0: 1 0 2, b1: 1 0 2, b2: 1 0 2, b3: 1 0, b4: 1
// batch_start_positions[6] = {0, 3, 6, 9, 11, 12}
// batch_start_positions[0] = len(b0)
// batch_start_positions[1] = len(b0) + len(b1)
// batch_start_positions[2] = len(b0) + len(b1) + len(b2)
// ...
// seq2batch_idx[12] = {4, 0, 9,
// 5, 1, 10,
// 6, 2, 11,
// 7, 3,
// 8}
// seq_order = {1, 0, 2}, the sort order.
// where 1 is the second sequence,
// 0 is the first sequence,
// 2 is the third sequence.
// The num_batch represents batch size after rearranging the
// input LodTensor. It is also the maximum length of input sequence.
std::vector<std::vector<int>> batch_seq_meta;
batch_seq_meta.emplace_back(std::vector<int> {0});
batch_seq_meta.emplace_back(std::vector<int> {0});
batch_seq_meta.emplace_back(std::vector<int> {0});
// batch_seq_meta[0] is the start positions for batch LoDTensor
int num_batch = seq_info[0].length;
batch_seq_meta[0].resize(static_cast<int>(num_batch + 1));
// batch_seq_meta[1] is the raw index in the input LoDTensor
batch_seq_meta[1].resize(static_cast<int>(seq->num()));
// batch_seq_meta[2] is the sort order for the input LoDTensor.
batch_seq_meta[2].resize(seq_info.size());
int* batch_starts = batch_seq_meta[0].data();
int* seq2batch_idx = batch_seq_meta[1].data();
batch_starts[0] = 0;
for (int n = 0; n < num_batch; n++) {
auto batch_id = static_cast<int>(batch_starts[n]);
for (int i = 0; i < seq_info.size(); ++i) {
int seq_len = seq_info[i].length;
int start = seq_info[i].start;
if (n < seq_len) {
seq2batch_idx[batch_id] =
is_reverse ? start + seq_len - 1 - n : start + n;
batch_id++;
} else {
break;
}
}
batch_starts[n + 1] = static_cast<int>(batch_id);
}
int* seq_order = batch_seq_meta[2].data();
for (int i = 0; i < seq_info.size(); ++i) {
seq_order[i] = seq_info[i].seq_idx;
}
seq_to_batch_meta = batch_seq_meta;
CopyMatrixRowsFunctor<Dtype, LayOutType> to_batch;
to_batch(seq, batch_seq_meta[1], batch, true, fragment_num);
}
};
template <DataType Dtype, typename LayOutType>
class Batch2SeqFunctor {
public:
typedef Tensor<X86> ioTensor;
void operator()(ioTensor* batch,
ioTensor* seq, std::vector<std::vector<int>>& seq_to_batch_meta,
int fragment_num = 1,
int offset = 0,
int width = 0) const {
if (seq_to_batch_meta.size() < 2) {
LOG(ERROR) << "The size of seq_to_batch_meta should inlcude at least 2-level sequence information.";
exit(-1);
}
if (seq_to_batch_meta[1].size() != static_cast<int>(seq->num())) {
LOG(ERROR) << "The seq_to_batch information should be consistent with the dims.";
exit(-1);
}
CopyMatrixRowsFunctor<Dtype, LayOutType> to_seq;
to_seq(batch, seq_to_batch_meta[1], seq, false, fragment_num, offset, width);
}
};
template <DataType Dtype, typename LayOutType>
class ReorderInitState {
public:
typedef Tensor<X86> ioTensor;
void operator()(ioTensor* src, std::vector<int> ind_lod, ioTensor* dst, bool indexed_src,
int fragment_num = 1) {
math::CopyMatrixRowsFunctor<Dtype, LayOutType> row_shuffle;
row_shuffle(src, ind_lod, dst, indexed_src, fragment_num);
}
};
/*
* This class can used to modify the matrix structure of sequence matrix into
* batch structure.
* sequence matrix: [C1_s ... Cn_s | ...... | C1_t ... Cn_t]
* batch matrix: [C1_s ... C1_t | ...... | Cn_s ... Cn_t]
* Cn_s is the state for sequence s at time n.
*
* Exampel: sequence matrix = {{0, 0, 0, 0}, {1, 1, 1, 1, 1}, {2, 2, 2}}
* s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2
* batch matrix = {{1, 0, 2}, {1, 0, 2}, {1, 0, 2}, {1, 0}, {1}}
* b0: 1 0 2, b1: 1 0 2, b2: 1 0 2, b3: 1 0, b4: 1
*
* Use:
* Input: seqMatrix, seqStarts(Sequence Start Positions)
* Output: batchMatrix
* 1. SequenceToBatch seq2batch;
* 2. seq2batch.resizeOrCreateBatch(seqStarts); // calculate seq2BatchIdx
* 3. seq2batch.copy(seqMatrix, batchMatrix, true); // copy seq to batch matrix
*
*/
class SequenceToBatch {
public:
SequenceToBatch() {};
template <typename Dtype>
void seq_2_bat(const Dtype* input, Dtype* output, int word_size) {
int word_sum = seq2BatchIdx_.size();
#pragma omp parallel for if(thread_num > 1)
for (int old_id = 0; old_id < word_sum; ++old_id) {
int word_start = old_id * word_size;
int maped_id = seq2BatchIdx_[old_id];
int maped_start = maped_id * word_size;
for (int word_offset = 0; word_offset < word_size; ++word_offset) {
output[word_start + word_offset] = input[maped_start + word_offset];
}
}
}
template <typename Dtype>
void hidden_2_bat(const Dtype* input, Dtype* output, int hidden_size) {
int batch_size = seqStartAndLength_.size();
for (int old_id = 0; old_id < batch_size; ++old_id) {
int word_start = old_id * hidden_size;
int maped_id = seqStartAndLength_[old_id].seqIdx_;
int maped_start = maped_id * hidden_size;
for (int word_offset = 0; word_offset < hidden_size; ++word_offset) {
output[word_start + word_offset] = input[maped_start + word_offset];
}
}
}
template <typename Dtype>
void bat_2_seq(const Dtype* input, Dtype* output, int hidden_size) {
int word_sum = seq2BatchIdx_.size();
#pragma omp parallel for if(thread_num > 1)
for (int old_id = 0; old_id < word_sum; old_id++) {
int word_start = old_id * hidden_size;
int maped_id = seq2BatchIdx_[old_id];
int maped_start = maped_id * hidden_size;
for (int word_offset = 0; word_offset < hidden_size; word_offset++) {
output[maped_start + word_offset] = input[word_start + word_offset];
}
}
}
template <typename Dtype>
void bat_2_seq(const Dtype* input, Dtype* output, int hidden_size, int aligned_hidden_size) {
int word_sum = seq2BatchIdx_.size();
#pragma omp parallel for if(thread_num > 1)
for (int old_id = 0; old_id < word_sum; old_id++) {
int word_start = old_id * aligned_hidden_size;
int maped_id = seq2BatchIdx_[old_id];
int maped_start = maped_id * hidden_size;
for (int word_offset = 0; word_offset < hidden_size; word_offset++) {
output[maped_start + word_offset] = input[word_start + word_offset];
}
}
}
void get_batch_offset(std::vector<int>& bat_offset) {
for (size_t i = 0; i < batchStartPositions_.size(); i++) {
bat_offset[i] = batchStartPositions_[i];
}
}
size_t get_batch_num() const {
return numBatch_;
}
void create_batch(int batchSize, size_t numSequences, std::vector<int>& seqStarts,
bool reversed) {
CHECK_EQ(seqStarts[numSequences], batchSize);
seq2BatchIdx_.resize(batchSize);
/*
* calculate the length of each sequence & sort sequence index by the length
* Exampel: Sequences = {s0, s1, s2}
* s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2
* seqStartAndLength_[3] = {(4, 5, 1), (0, 4, 0), (9, 3, 2)}
*/
for (size_t seqId = 0; seqId < numSequences; ++seqId) {
int length = seqStarts[seqId + 1] - seqStarts[seqId];
seqStartAndLength_.emplace_back(seqStarts[seqId], length, seqId);
}
std::sort(seqStartAndLength_.begin(), seqStartAndLength_.end(),
[](SeqStartAndLength a, SeqStartAndLength b) {
return a.length_ > b.length_;
});
/*
* calculate the start position of each batch
* (numBatch equal the maxLength of sequences)
* Exampel: Sequences = {s0, s1, s2}
* s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2
* numBatch = 5,
* batchIndex = {b0, b1, b2, b3, b4}
* b0: 1 0 2, b1: 1 0 2, b2: 1 0 2, b3: 1 0, b4: 1
* batchStartPositions[6] = {0, 3, 6, 9, 11, 12}
*/
numBatch_ = (size_t)seqStartAndLength_[0].length_;
batchStartPositions_.resize(numBatch_ + 1);
batchStartPositions_[0] = 0;
for (size_t n = 0; n < numBatch_; n++) {
int batchId = batchStartPositions_[n];
for (size_t i = 0; i < seqStartAndLength_.size(); ++i) {
size_t seqLength = seqStartAndLength_[i].length_;
int start = seqStartAndLength_[i].start_;
if (n < seqLength) {
if (!reversed) {
seq2BatchIdx_[batchId] = start + n;
} else {
seq2BatchIdx_[batchId] = start + seqLength - 1 - n;
}
batchId++;
} else {
break;
}
}
batchStartPositions_[n + 1] = batchId;
}
}
protected:
struct SeqStartAndLength {
int start_;
int length_;
int seqIdx_;
SeqStartAndLength(int start, int length, int seqIdx)
: start_(start), length_(length), seqIdx_(seqIdx) {}
};
std::vector<SeqStartAndLength> seqStartAndLength_;
std::vector<int> batchStartPositions_;
std::vector<int> seq2BatchIdx_;
size_t numBatch_{0};
int thread_num = anakin_get_max_threads();
};
} // namespace math
} // namespace saber
} // namespace anakin
#endif
|
csr_block_matvec.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matvec functions for hypre_CSRBlockMatrix class.
*
*****************************************************************************/
#include "csr_block_matrix.h"
#include "../seq_mv/seq_mv.h"
#include <assert.h>
/*--------------------------------------------------------------------------
* hypre_CSRBlockMatrixMatvec
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRBlockMatrixMatvec(HYPRE_Complex alpha, hypre_CSRBlockMatrix *A,
hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *y)
{
HYPRE_Complex *A_data = hypre_CSRBlockMatrixData(A);
HYPRE_Int *A_i = hypre_CSRBlockMatrixI(A);
HYPRE_Int *A_j = hypre_CSRBlockMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRBlockMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRBlockMatrixNumCols(A);
HYPRE_Int blk_size = hypre_CSRBlockMatrixBlockSize(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int y_size = hypre_VectorSize(y);
HYPRE_Int i, b1, b2, jj, bnnz=blk_size*blk_size;
HYPRE_Int ierr = 0;
HYPRE_Complex temp;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
if (num_cols*blk_size != x_size) ierr = 1;
if (num_rows*blk_size != y_size) ierr = 2;
if (num_cols*blk_size != x_size && num_rows*blk_size != y_size) ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*blk_size; i++) y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*blk_size; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*blk_size; i++)
y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,jj,b1,b2,temp) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
for (b1 = 0; b1 < blk_size; b1++)
{
temp = y_data[i*blk_size+b1];
for (b2 = 0; b2 < blk_size; b2++)
temp += A_data[jj*bnnz+b1*blk_size+b2] * x_data[A_j[jj]*blk_size+b2];
y_data[i*blk_size+b1] = temp;
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*blk_size; i++)
y_data[i] *= alpha;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRBlockMatrixMatvecT
*
* Performs y <- alpha * A^T * x + beta * y
*
* From Van Henson's modification of hypre_CSRMatrixMatvec.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRBlockMatrixMatvecT( HYPRE_Complex alpha,
hypre_CSRBlockMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
HYPRE_Complex *A_data = hypre_CSRBlockMatrixData(A);
HYPRE_Int *A_i = hypre_CSRBlockMatrixI(A);
HYPRE_Int *A_j = hypre_CSRBlockMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRBlockMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRBlockMatrixNumCols(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int y_size = hypre_VectorSize(y);
HYPRE_Complex temp;
HYPRE_Int i, j, jj;
HYPRE_Int ierr = 0;
HYPRE_Int b1, b2;
HYPRE_Int blk_size = hypre_CSRBlockMatrixBlockSize(A);
HYPRE_Int bnnz=blk_size*blk_size;
/*---------------------------------------------------------------------
* Check for size compatibility. MatvecT returns ierr = 1 if
* length of X doesn't equal the number of rows of A,
* ierr = 2 if the length of Y doesn't equal the number of
* columns of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in MatvecT, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
if (num_rows*blk_size != x_size)
ierr = 1;
if (num_cols*blk_size != y_size)
ierr = 2;
if (num_rows*blk_size != x_size && num_cols*blk_size != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*blk_size; i++)
y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*blk_size; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*blk_size; i++)
y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A^T*x
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i, jj,j, b1, b2) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++) /*each nonzero in that row*/
{
for (b1 = 0; b1 < blk_size; b1++) /*row */
{
for (b2 = 0; b2 < blk_size; b2++) /*col*/
{
j = A_j[jj]; /*col */
y_data[j*blk_size+b2] +=
A_data[jj*bnnz+b1*blk_size+b2] * x_data[i*blk_size + b1];
}
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*blk_size; i++)
y_data[i] *= alpha;
}
return ierr;
}
|
colormap.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR M M AAA PPPP %
% C O O L O O R R MM MM A A P P %
% C O O L O O RRRR M M M AAAAA PPPP %
% C O O L O O R R M M A A P %
% CCCC OOO LLLLL OOO R R M M A A P %
% %
% %
% MagickCore Colormap Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% We use linked-lists because splay-trees do not currently support duplicate
% key / value pairs (.e.g X11 green compliance and SVG green compliance).
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/cache-view.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/client.h"
#include "magick/configure.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/semaphore.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/xml-tree.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageColormap() allocates an image colormap and initializes
% it to a linear gray colorspace. If the image already has a colormap,
% it is replaced. AcquireImageColormap() returns MagickTrue if successful,
% otherwise MagickFalse if there is not enough memory.
%
% The format of the AcquireImageColormap method is:
%
% MagickBooleanType AcquireImageColormap(Image *image,const size_t colors)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colors: the number of colors in the image colormap.
%
*/
static inline size_t MagickMax(const size_t x,
const size_t y)
{
if (x > y)
return(x);
return(y);
}
MagickExport MagickBooleanType AcquireImageColormap(Image *image,
const size_t colors)
{
register ssize_t
i;
/*
Allocate image colormap.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->colors=MagickMax(colors,2);
if (image->colormap == (PixelPacket *) NULL)
image->colormap=(PixelPacket *) AcquireQuantumMemory(image->colors,
sizeof(*image->colormap));
else
image->colormap=(PixelPacket *) ResizeQuantumMemory(image->colormap,
image->colors,sizeof(*image->colormap));
if (image->colormap == (PixelPacket *) NULL)
{
image->colors=0;
image->storage_class=DirectClass;
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
for (i=0; i < (ssize_t) image->colors; i++)
{
size_t
pixel;
pixel=(size_t) (i*(QuantumRange/(image->colors-1)));
image->colormap[i].red=(Quantum) pixel;
image->colormap[i].green=(Quantum) pixel;
image->colormap[i].blue=(Quantum) pixel;
image->colormap[i].opacity=OpaqueOpacity;
}
return(SetImageStorageClass(image,PseudoClass));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C y c l e C o l o r m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CycleColormap() displaces an image's colormap by a given number of
% positions. If you cycle the colormap a number of times you can produce
% a psychodelic effect.
%
% The format of the CycleColormapImage method is:
%
% MagickBooleanType CycleColormapImage(Image *image,const ssize_t displace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o displace: displace the colormap this amount.
%
*/
MagickExport MagickBooleanType CycleColormapImage(Image *image,
const ssize_t displace)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == DirectClass)
(void) SetImageType(image,PaletteType);
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
ssize_t
index;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) (GetPixelIndex(indexes+x)+displace) %
image->colors;
if (index < 0)
index+=(ssize_t) image->colors;
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S o r t C o l o r m a p B y I n t e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SortColormapByIntensity() sorts the colormap of a PseudoClass image by
% decreasing color intensity.
%
% The format of the SortColormapByIntensity method is:
%
% MagickBooleanType SortColormapByIntensity(Image *image)
%
% A description of each parameter follows:
%
% o image: A pointer to an Image structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelPacket
*color_1,
*color_2;
int
intensity;
color_1=(const PixelPacket *) x;
color_2=(const PixelPacket *) y;
intensity=PixelPacketIntensity(color_2)-(int) PixelPacketIntensity(color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport MagickBooleanType SortColormapByIntensity(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
unsigned short
*pixels;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (image->storage_class != PseudoClass)
return(MagickTrue);
/*
Allocate memory for pixel indexes.
*/
pixels=(unsigned short *) AcquireQuantumMemory((size_t) image->colors,
sizeof(*pixels));
if (pixels == (unsigned short *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Assign index values to colormap entries.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].opacity=(IndexPacket) i;
/*
Sort image colormap by decreasing color popularity.
*/
qsort((void *) image->colormap,(size_t) image->colors,
sizeof(*image->colormap),IntensityCompare);
/*
Update image colormap indexes to sorted colormap order.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
pixels[(ssize_t) image->colormap[i].opacity]=(unsigned short) i;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
index;
register ssize_t
x;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(IndexPacket) pixels[(ssize_t) GetPixelIndex(indexes+x)];
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (status == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
pixels=(unsigned short *) RelinquishMagickMemory(pixels);
return(status);
}
|
hmacSHA512_fmt_plug.c | /*
* This software is Copyright (c) 2012 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* Based on hmac-md5 by Bartavelle
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_hmacSHA512;
#elif FMT_REGISTERS_H
john_register_one(&fmt_hmacSHA512);
#else
#include "sha2.h"
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#define OMP_SCALE 64
#endif
#include "memdbg.h"
#define FORMAT_LABEL "HMAC-SHA512"
#define FORMAT_NAME ""
#if ARCH_BITS >= 64
#define ALGORITHM_NAME "password is key, SHA512 64/" ARCH_BITS_STR " " SHA2_LIB
#else
#define ALGORITHM_NAME "password is key, SHA512 32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define PAD_SIZE 128
#define BINARY_SIZE (512/8)
#define BINARY_ALIGN 4
#define SALT_SIZE PAD_SIZE
#define SALT_ALIGN 1
#define CIPHERTEXT_LENGTH (SALT_SIZE + 1 + BINARY_SIZE * 2)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"what do ya want for nothing?#164b7a7bfcf819e2e395fbe73b56e0a387bd64222e831fd610270cd7ea2505549758bf75c05a994a6d034f65f8f0e6fdcaeab1a34d4a6b4b636e070a38bce737", "Jefe"},
{"Reference hashes are keys to success#73a5eff716d0147a440fdf5aff187c52deab8c4dc55073be3d5742e788a99fd6b53a5894725f0f88f3486b5bb63d2af930a0cf6267af572128273daf8eee4cfa", "The magnum"},
{"Beppe#Grillo#AB08C46822313481D548412A084F08C7CA3BBF8A98D901D14698759F4C36ADB07528348D56CAF4F6AF654E14FC102FF10DCF50794A82544426386C7BE238CEAF", "Io credo nella reincarnazione e sono di Genova; per cui ho fatto testamento e mi sono lasciato tutto a me."},
{NULL}
};
static char (*saved_plain)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD (*crypt_key)[BINARY_SIZE / sizeof(ARCH_WORD) + 1];
static unsigned char (*opad)[PAD_SIZE];
static unsigned char (*ipad)[PAD_SIZE];
static unsigned char cursalt[SALT_SIZE];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_plain = mem_calloc_tiny(sizeof(*saved_plain) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
opad = mem_calloc_tiny(sizeof(*opad) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
ipad = mem_calloc_tiny(sizeof(*opad) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
int pos, i;
char *p;
p = strrchr(ciphertext, '#'); // allow # in salt
if (!p || p > &ciphertext[strlen(ciphertext)-1]) return 0;
i = (int)(p - ciphertext);
if(i > SALT_SIZE) return 0;
pos = i+1;
if (strlen(ciphertext+pos) != BINARY_SIZE*2) return 0;
for (i = pos; i < BINARY_SIZE*2+pos; i++)
{
if (!( (('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) ||
(('a' <= ciphertext[i])&&(ciphertext[i] <= 'f'))
|| (('A' <= ciphertext[i])&&(ciphertext[i] <= 'F'))))
return 0;
}
return 1;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH + 1];
strnzcpy(out, ciphertext, CIPHERTEXT_LENGTH + 1);
strlwr(strrchr(out, '#'));
return out;
}
static int get_hash_0(int index) { return crypt_key[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_key[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_key[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_key[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_key[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_key[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_key[index][0] & 0x7ffffff; }
static void set_salt(void *salt)
{
memcpy(cursalt, salt, SALT_SIZE);
}
static void set_key(char *key, int index)
{
int len;
int i;
len = strlen(key);
memset(ipad[index], 0x36, PAD_SIZE);
memset(opad[index], 0x5C, PAD_SIZE);
#if PLAINTEXT_LENGTH > PAD_SIZE
memcpy(saved_plain[index], key, len);
saved_plain[index][len] = 0;
if (len > PAD_SIZE) {
SHA512_CTX ctx;
unsigned char k0[BINARY_SIZE];
SHA512_Init( &ctx );
SHA512_Update( &ctx, key, len);
SHA512_Final( k0, &ctx);
len = BINARY_SIZE;
for(i=0;i<len;i++)
{
ipad[index][i] ^= k0[i];
opad[index][i] ^= k0[i];
}
}
else
#endif /* PLAINTEXT_LENGTH > PAD_SIZE */
for(i=0;i<len;i++)
{
ipad[index][i] ^= key[i];
opad[index][i] ^= key[i];
}
}
static char *get_key(int index)
{
#if PLAINTEXT_LENGTH > PAD_SIZE
return saved_plain[index];
#else
unsigned int i;
for(i=0;i<PLAINTEXT_LENGTH;i++)
saved_plain[index][i] = ipad[index][ i ] ^ 0x36;
saved_plain[index][i] = 0;
return (char*) saved_plain[index];
#endif
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_key[index], BINARY_SIZE))
return 1;
return 0;
}
static int cmp_exact(char *source, int count)
{
return (1);
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
SHA512_CTX ctx;
SHA512_Init( &ctx );
SHA512_Update( &ctx, ipad[index], PAD_SIZE );
SHA512_Update( &ctx, cursalt, strlen( (char*) cursalt) );
SHA512_Final( (unsigned char*) crypt_key[index], &ctx);
SHA512_Init( &ctx );
SHA512_Update( &ctx, opad[index], PAD_SIZE );
SHA512_Update( &ctx, crypt_key[index], BINARY_SIZE);
SHA512_Final( (unsigned char*) crypt_key[index], &ctx);
}
return count;
}
static void *binary(char *ciphertext)
{
static union toalign {
unsigned char c[BINARY_SIZE];
ARCH_WORD_32 a[1];
} a;
unsigned char *realcipher = a.c;
int i,pos;
for(i=strlen(ciphertext);ciphertext[i]!='#';i--); // allow # in salt
pos=i+1;
for(i=0;i<BINARY_SIZE;i++)
realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i*2+pos])]*16 + atoi16[ARCH_INDEX(ciphertext[i*2+1+pos])];
return (void*)realcipher;
}
static void *salt(char *ciphertext)
{
static unsigned char salt[SALT_SIZE];
memset(salt, 0, sizeof(salt));
// allow # in salt
memcpy(salt, ciphertext, strrchr(ciphertext, '#') - ciphertext);
return salt;
}
struct fmt_main fmt_hmacSHA512 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
binary,
salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
DRB003-antidep2-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A two-level loop nest with loop carried anti-dependence on the outer level.
Data race pair: a[i][j]@67:7 vs. a[i+1][j]@67:18
*/
#include <stdio.h>
#include <stdlib.h>
int main(int argc,char *argv[])
{
int i, j;
int len = 20;
double a[20][20];
#pragma omp parallel for private(j)
for (i=0; i< len; i++)
#pragma omp parallel for simd
for (j=0; j<len; j++)
a[i][j] = (i * len + j + 0.5);
for (i = 0; i < len - 1; i += 1) {
#pragma omp parallel for simd
for (j = 0; j < len ; j += 1) {
a[i][j] += a[i + 1][j];
}
}
#pragma omp parallel for private(j) ordered
for (i=0; i< len; i++)
#pragma omp parallel for simd ordered
for (j=0; j<len; j++)
#pragma omp ordered simd
printf("%lf",a[i][j]);
printf ("a[10][10]=%f\n", a[10][10]);
return 0;
}
|
solucao_omp_combinado.c | /******************************************************************************
* OpenMP Example - Combined Parallel Loop Work-sharing - C/C++ Version
* FILE: omp_workshare4.c
* DESCRIPTION:
* This is a corrected version of the omp_workshare3.c example. Corrections
* include removing all statements between the parallel for construct and
* the actual for loop, and introducing logic to preserve the ability to
* query a thread's id and print it from inside the for loop.
* SOURCE: Blaise Barney 5/99
* LAST REVISED:
******************************************************************************/
#include <omp.h>
#define N 50
#define CHUNK 5
main () {
int i, n, chunk, tid;
float a[N], b[N], c[N];
char first_time;
/* Some initializations */
for (i=0; i < N; i++)
a[i] = b[i] = i * 1.0;
n = N;
chunk = CHUNK;
first_time = 'y';
#pragma omp parallel for \
shared(a,b,c) \
private(i,tid) \
schedule(static,chunk) \
firstprivate(first_time)
for (i=0; i < n; i++)
{
if (first_time == 'y')
{
tid = omp_get_thread_num();
first_time = 'n';
}
c[i] = a[i] + b[i];
printf("tid= %d i= %d c[i]= %f\n", tid, i, c[i]);
}
}
|
pooling_hcl_arm.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "pooling_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "module/module.h"
#include "operator/op.h"
#include "utility/float.h"
#include "utility/sys_port.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <arm_neon.h>
#define POOL_GENERIC 0
#define POOL_K2S2 1
#define POOL_K3S2 2
#define POOL_K3S1 3
typedef void (*pooling_kernel_t)(const void* input, void* output, int inc, int inh, int inw, int outh, int outw, int,
int, int, int, int, int, int pad_h1, int pad_w1, int);
static void avg_2x2s2(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h,
int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe)
{
int in_hw = inw * inh;
int out_hw = outh * outw;
if (pad_w1 > 0)
{
outw--;
}
if (pad_h1 > 0)
{
outh--;
}
int block_w = outw >> 2;
int remain_w = inw - outw * 2;
for (int c = 0; c < inc; c++)
{
const float* line0 = input + c * in_hw;
const float* line1 = line0 + inw;
float* out_ptr = output + c * out_hw;
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < block_w; j++)
{
float32x4_t p00 = vld1q_f32(line0);
float32x4_t p10 = vld1q_f32(line1);
float32x4_t sum0 = vaddq_f32(p00, p10);
float32x4_t p01 = vld1q_f32(line0 + 4);
float32x4_t p11 = vld1q_f32(line1 + 4);
float32x4_t sum1 = vaddq_f32(p01, p11);
#ifdef __aarch64__
sum0 = vpaddq_f32(sum0, sum1);
#else
float32x2_t sum0_1 = vpadd_f32(vget_low_f32(sum0), vget_high_f32(sum0));
float32x2_t sum0_2 = vpadd_f32(vget_low_f32(sum1), vget_high_f32(sum1));
sum0 = vcombine_f32(sum0_1, sum0_2);
#endif
sum0 = vmulq_n_f32(sum0, 0.25f);
vst1q_f32(out_ptr, sum0);
line0 += 8;
line1 += 8;
out_ptr += 4;
}
for (int j = block_w * 4; j < outw; j++)
{
float32x2_t p1 = vld1_f32(line0);
float32x2_t p2 = vld1_f32(line1);
float32x2_t sum = vadd_f32(p1, p2);
*out_ptr = (sum[0] + sum[1]) * 0.25f;
out_ptr++;
line0 += 2;
line1 += 2;
}
if (pad_w1)
{
*out_ptr = (line0[0] + line1[0]) * 0.5f;
out_ptr++;
}
line0 += remain_w + inw;
line1 += remain_w + inw;
}
if (pad_h1)
{
for (int j = 0; j < block_w; j++)
{
float32x4_t p00 = vld1q_f32(line0);
float32x4_t p01 = vld1q_f32(line0 + 4);
#ifdef __aarch64__
p00 = vpaddq_f32(p00, p01);
#else
float32x2_t sum0_1 = vpadd_f32(vget_low_f32(p00), vget_high_f32(p00));
float32x2_t sum0_2 = vpadd_f32(vget_low_f32(p01), vget_high_f32(p01));
p00 = vcombine_f32(sum0_1, sum0_2);
#endif
p00 = vmulq_n_f32(p00, 0.5f);
vst1q_f32(out_ptr, p00);
line0 += 8;
out_ptr += 4;
}
for (int j = block_w * 4; j < outw; j++)
{
float32x2_t p1 = vld1_f32(line0);
*out_ptr = (p1[0] + p1[1]) * 0.5f;
out_ptr++;
line0 += 2;
}
if (pad_w1)
{
*out_ptr = line0[0];
out_ptr++;
}
}
}
}
static void max_2x2s2(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h,
int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe)
{
int in_hw = inw * inh;
int out_hw = outh * outw;
if (pad_w1 > 0)
{
outw--;
}
if (pad_h1 > 0)
{
outh--;
}
int block_w = outw >> 2;
int remain_w = inw - outw * 2;
for (int c = 0; c < inc; c++)
{
const float* line0 = input + c * in_hw;
const float* line1 = line0 + inw;
float* out_ptr = output + c * out_hw;
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < block_w; j++)
{
float32x4_t p00 = vld1q_f32(line0);
float32x4_t p10 = vld1q_f32(line1);
float32x4_t p01 = vld1q_f32(line0 + 4);
float32x4_t p11 = vld1q_f32(line1 + 4);
#ifdef __aarch64__
float32x4_t max0 = vmaxq_f32(p00, p10);
float32x4_t max1 = vmaxq_f32(p01, p11);
/* pairwaise max */
float32x4_t _max = vpmaxq_f32(max0, max1);
#else
float32x2_t max0_1 = vpmax_f32(vget_low_f32(p00), vget_low_f32(p10));
float32x2_t max0_2 = vpmax_f32(vget_high_f32(p00), vget_high_f32(p10));
max0_1 = vpmax_f32(max0_1, max0_2);
float32x2_t max1_1 = vpmax_f32(vget_low_f32(p01), vget_low_f32(p11));
float32x2_t max1_2 = vpmax_f32(vget_high_f32(p01), vget_high_f32(p11));
max1_1 = vpmax_f32(max1_1, max1_2);
float32x4_t _max = vcombine_f32(max0_1, max1_1);
#endif
vst1q_f32(out_ptr, _max);
line0 += 8;
line1 += 8;
out_ptr += 4;
}
for (int j = block_w * 4; j < outw; j++)
{
float32x2_t p1 = vld1_f32(line0);
float32x2_t p2 = vld1_f32(line1);
float32x2_t _max = vmax_f32(p1, p2);
*out_ptr = fmax(_max[0], _max[1]);
out_ptr++;
line0 += 2;
line1 += 2;
}
if (pad_w1 > 0)
{
*out_ptr = fmax(line0[0], line1[0]);
out_ptr++;
}
line0 += remain_w + inw;
line1 += remain_w + inw;
}
if (pad_h1 > 0)
{
for (int j = 0; j < block_w; j++)
{
float32x4_t p00 = vld1q_f32(line0);
float32x4_t p01 = vld1q_f32(line0 + 4);
#ifdef __aarch64__
p00 = vpmaxq_f32(p00, p01);
#else
float32x2_t max0_1 = vpmax_f32(vget_low_f32(p00), vget_high_f32(p00));
float32x2_t max0_2 = vpmax_f32(vget_low_f32(p01), vget_high_f32(p01));
p00 = vcombine_f32(max0_1, max0_2);
#endif
vst1q_f32(out_ptr, p00);
line0 += 8;
out_ptr += 4;
}
for (int j = block_w * 4; j < outw; j++)
{
float32x2_t p1 = vld1_f32(line0);
*out_ptr = fmax(p1[0], p1[1]);
out_ptr++;
line0 += 2;
}
if (pad_w1 > 0)
{
*out_ptr = line0[0];
out_ptr++;
}
}
}
}
static void avg_3x3s2(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h,
int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe)
{
int in_hw = inw * inh;
int out_hw = outh * outw;
if (pad_w1 > 0)
{
outw--;
}
if (pad_h1 > 0)
{
outh--;
}
int block_w = outw >> 2;
int remain_w = inw - outw * 2;
for (int c = 0; c < inc; c++)
{
const float* line0 = input + c * in_hw;
const float* line1 = line0 + inw;
const float* line2 = line1 + inw;
float* out_ptr = output + c * out_hw;
for (int i = 0; i < outh; i++)
{
float32x4x2_t p00 = vld2q_f32(line0);
float32x4x2_t p10 = vld2q_f32(line1);
float32x4x2_t p20 = vld2q_f32(line2);
for (int j = 0; j < block_w; j++)
{
float32x4x2_t p00_new = vld2q_f32(line0 + 8);
float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]);
float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1);
sum0 = vaddq_f32(sum0, p01);
float32x4x2_t p10_new = vld2q_f32(line1 + 8);
float32x4_t sum1 = vaddq_f32(p10.val[0], p10.val[1]);
float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1);
sum1 = vaddq_f32(sum1, p11);
float32x4x2_t p20_new = vld2q_f32(line2 + 8);
float32x4_t sum2 = vaddq_f32(p20.val[0], p20.val[1]);
float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1);
sum2 = vaddq_f32(sum2, p21);
sum0 = vaddq_f32(vaddq_f32(sum0, sum1), sum2);
sum0 = vmulq_n_f32(sum0, 0.11111111f);
vst1q_f32(out_ptr, sum0);
p00 = p00_new;
p10 = p10_new;
p20 = p20_new;
line0 += 8;
line1 += 8;
line2 += 8;
out_ptr += 4;
}
for (int j = block_w * 4; j < outw; j++)
{
*out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.11111111f;
out_ptr++;
line0 += 2;
line1 += 2;
line2 += 2;
}
if (pad_w1 == 1)
{
*out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.16666667f;
out_ptr++;
}
line0 += remain_w + inw;
line1 += remain_w + inw;
line2 += remain_w + inw;
}
if (pad_h1 == 1)
{
float32x4x2_t p00 = vld2q_f32(line0);
float32x4x2_t p10 = vld2q_f32(line1);
for (int j = 0; j < block_w; j++)
{
float32x4x2_t p00_new = vld2q_f32(line0 + 8);
float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]);
float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1);
sum0 = vaddq_f32(sum0, p01);
float32x4x2_t p10_new = vld2q_f32(line1 + 8);
float32x4_t sum1 = vaddq_f32(p10.val[0], p10.val[1]);
float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1);
sum1 = vaddq_f32(sum1, p11);
sum0 = vaddq_f32(sum0, sum1);
sum0 = vmulq_n_f32(sum0, 0.16666667f);
vst1q_f32(out_ptr, sum0);
p00 = p00_new;
p10 = p10_new;
line0 += 8;
line1 += 8;
out_ptr += 4;
}
for (int j = block_w * 4; j < outw; j++)
{
*out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]) * 0.16666667f;
out_ptr++;
line0 += 2;
line1 += 2;
}
if (pad_w1 == 1)
{
*out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f;
out_ptr++;
}
else if (pad_w1 == 2)
{
*out_ptr = (line0[0] + line1[0]) * 0.5f;
out_ptr++;
}
}
else if (pad_h1 == 2)
{
float32x4x2_t p00 = vld2q_f32(line0);
for (int j = 0; j < block_w; j++)
{
float32x4x2_t p00_new = vld2q_f32(line0 + 8);
float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]);
float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1);
sum0 = vaddq_f32(sum0, p01);
sum0 = vmulq_n_f32(sum0, 0.3333333f);
vst1q_f32(out_ptr, sum0);
p00 = p00_new;
line0 += 8;
out_ptr += 4;
}
for (int j = block_w * 4; j < outw; j++)
{
*out_ptr = (line0[0] + line0[1] + line0[2]) * 0.3333333f;
out_ptr++;
line0 += 2;
}
if (pad_w1 == 1)
{
*out_ptr = (line0[0] + line0[1]) * 0.5f;
out_ptr++;
}
else if (pad_w1 == 2)
{
*out_ptr = line0[0];
out_ptr++;
}
}
}
}
static void max_3x3s2(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h,
int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe)
{
int in_hw = inw * inh;
int out_hw = outh * outw;
if (pad_w1 > 0)
{
outw--;
}
if (pad_h1 > 0)
{
outh--;
}
int block_w = outw >> 2;
int remain_w = inw - outw * 2;
for (int c = 0; c < inc; c++)
{
const float* line0 = input + c * in_hw;
const float* line1 = line0 + inw;
const float* line2 = line1 + inw;
float* out_ptr = output + c * out_hw;
for (int i = 0; i < outh; i++)
{
float32x4x2_t p00 = vld2q_f32(line0);
float32x4x2_t p10 = vld2q_f32(line1);
float32x4x2_t p20 = vld2q_f32(line2);
for (int j = 0; j < block_w; j++)
{
/*
p00 = [1,2,3,4,5,6,7,8]
p00.val[0]=[1,3,5,7]
max0 = [2,4,6,8]
p00_new = [9,10,11,12,13,14,15,16]
p01 = [3,5,7,9]
max0=max(max0,p01)=[3,5,7,9]
*/
float32x4x2_t p00_new = vld2q_f32(line0 + 8);
float32x4_t max0 = vmaxq_f32(p00.val[0], p00.val[1]);
float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1);
max0 = vmaxq_f32(max0, p01);
float32x4x2_t p10_new = vld2q_f32(line1 + 8);
float32x4_t max1 = vmaxq_f32(p10.val[0], p10.val[1]);
float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1);
max1 = vmaxq_f32(max1, p11);
float32x4x2_t p20_new = vld2q_f32(line2 + 8);
float32x4_t max2 = vmaxq_f32(p20.val[0], p20.val[1]);
float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1);
max2 = vmaxq_f32(max2, p21);
max0 = vmaxq_f32(vmaxq_f32(max0, max1), max2);
vst1q_f32(out_ptr, max0);
p00 = p00_new;
p10 = p10_new;
p20 = p20_new;
line0 += 8;
line1 += 8;
line2 += 8;
out_ptr += 4;
}
for (int j = block_w * 4; j < outw; j++)
{
float max0 = fmax(fmax(line0[0], line0[1]), line0[2]);
float max1 = fmax(fmax(line1[0], line1[1]), line1[2]);
float max2 = fmax(fmax(line2[0], line2[1]), line2[2]);
*out_ptr = fmax(fmax(max0, max1), max2);
out_ptr++;
line0 += 2;
line1 += 2;
line2 += 2;
}
if (pad_w1 == 1)
{
float max0 = fmax(fmax(line0[0], line0[1]), fmax(line1[0], line1[1]));
*out_ptr = fmax(fmax(line2[0], line2[1]), max0);
out_ptr++;
}
line0 += remain_w + inw;
line1 += remain_w + inw;
line2 += remain_w + inw;
}
if (pad_h1 == 1)
{
float32x4x2_t p00 = vld2q_f32(line0);
float32x4x2_t p10 = vld2q_f32(line1);
for (int j = 0; j < block_w; j++)
{
float32x4x2_t p00_new = vld2q_f32(line0 + 8);
float32x4_t max0 = vmaxq_f32(p00.val[0], p00.val[1]);
float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1);
max0 = vmaxq_f32(max0, p01);
float32x4x2_t p10_new = vld2q_f32(line1 + 8);
float32x4_t max1 = vmaxq_f32(p10.val[0], p10.val[1]);
float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1);
max1 = vmaxq_f32(max1, p11);
vst1q_f32(out_ptr, vmaxq_f32(max0, max1));
p00 = p00_new;
p10 = p10_new;
line0 += 8;
line1 += 8;
out_ptr += 4;
}
for (int j = block_w * 4; j < outw; j++)
{
float max0 = fmax(fmax(line0[0], line0[1]), line0[2]);
float max1 = fmax(fmax(line1[0], line1[1]), line1[2]);
*out_ptr = fmax(max0, max1);
out_ptr++;
line0 += 2;
line1 += 2;
}
if (pad_w1 == 1)
{
*out_ptr = fmax(fmax(line0[0], line0[1]), fmax(line1[0], line1[1]));
out_ptr++;
}
}
}
}
static void avg_2x2s2_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h,
int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe)
{
int in_hw = inw * inh;
int out_hw = outh * outw;
if (inw % 2 == 0)
outw--;
if (inh % 2 == 0)
outh--;
int block_w = (outw - 1) >> 2;
int remain_w = inw - outw * 2 + 1;
for (int c = 0; c < inc; c++)
{
const float* line00 = input + c * in_hw;
float* out_ptr = output + c * out_hw;
// h begin
if (is_caffe == 0)
*out_ptr = line00[0];
else
*out_ptr = line00[0] * 0.25f;
out_ptr++;
line00++;
for (int j = 0; j < block_w; j++)
{
float32x4_t p00 = vld1q_f32(line00);
float32x4_t p01 = vld1q_f32(line00 + 4);
#ifdef __aarch64__
float32x4_t sum0 = vpaddq_f32(p00, p01);
#else
float32x2_t sum0_1 = vpadd_f32(vget_low_f32(p00), vget_high_f32(p00));
float32x2_t sum0_2 = vpadd_f32(vget_low_f32(p01), vget_high_f32(p01));
float32x4_t sum0 = vcombine_f32(sum0_1, sum0_2);
#endif
if (is_caffe == 0)
sum0 = vmulq_n_f32(sum0, 0.5f);
else
sum0 = vmulq_n_f32(sum0, 0.25f);
vst1q_f32(out_ptr, sum0);
line00 += 8;
out_ptr += 4;
}
for (int j = block_w * 4 + 1; j < outw; j++)
{
if (is_caffe == 0)
*out_ptr = (line00[0] + line00[1]) * 0.5f;
else
*out_ptr = (line00[0] + line00[1]) * 0.25f;
out_ptr++;
line00 += 2;
}
if (inw % 2 == 0)
{
if (is_caffe == 0)
*out_ptr = line00[0];
else
*out_ptr = line00[0] * 0.25f;
out_ptr++;
}
line00 += remain_w;
// h center
const float* line0 = line00;
const float* line1 = line0 + inw;
for (int i = 1; i < outh; i++)
{
// w begin
if (is_caffe == 0)
*out_ptr = (line0[0] + line1[0]) * 0.5f;
else
*out_ptr = (line0[0] + line1[0]) * 0.25f;
out_ptr++;
line0++;
line1++;
// w center
for (int j = 0; j < block_w; j++)
{
float32x4_t p00 = vld1q_f32(line0);
float32x4_t p10 = vld1q_f32(line1);
float32x4_t sum0 = vaddq_f32(p00, p10);
float32x4_t p01 = vld1q_f32(line0 + 4);
float32x4_t p11 = vld1q_f32(line1 + 4);
float32x4_t sum1 = vaddq_f32(p01, p11);
#ifdef __aarch64__
float32x4_t _sum = vpaddq_f32(sum0, sum1);
#else
float32x2_t sum0_1 = vpadd_f32(vget_low_f32(sum0), vget_high_f32(sum0));
float32x2_t sum0_2 = vpadd_f32(vget_low_f32(sum1), vget_high_f32(sum1));
float32x4_t _sum = vcombine_f32(sum0_1, sum0_2);
#endif
_sum = vmulq_n_f32(_sum, 0.25f);
vst1q_f32(out_ptr, _sum);
out_ptr += 4;
line0 += 8;
line1 += 8;
}
for (int j = block_w * 4 + 1; j < outw; j++)
{
*out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f;
out_ptr++;
line0 += 2;
line1 += 2;
}
// w end
if (inw % 2 == 0)
{
if (is_caffe == 0)
*out_ptr = (line0[0] + line1[0]) * 0.5f;
else
*out_ptr = (line0[0] + line1[0]) * 0.25f;
out_ptr++;
}
line0 += remain_w + inw;
line1 += remain_w + inw;
}
// h end
if (inh % 2 == 0)
{
if (is_caffe == 0)
*out_ptr = line0[0];
else
*out_ptr = line0[0] * 0.25f;
out_ptr++;
line0++;
for (int j = 0; j < block_w; j++)
{
float32x4_t p00 = vld1q_f32(line0);
float32x4_t p01 = vld1q_f32(line0 + 4);
#ifdef __aarch64__
float32x4_t _sum = vpaddq_f32(p00, p01);
#else
float32x2_t sum0_1 = vpadd_f32(vget_low_f32(p00), vget_high_f32(p00));
float32x2_t sum0_2 = vpadd_f32(vget_low_f32(p01), vget_high_f32(p01));
float32x4_t _sum = vcombine_f32(sum0_1, sum0_2);
#endif
if (is_caffe == 0)
_sum = vmulq_n_f32(_sum, 0.5f);
else
_sum = vmulq_n_f32(_sum, 0.25f);
vst1q_f32(out_ptr, _sum);
out_ptr += 4;
line0 += 8;
}
for (int j = block_w * 4 + 1; j < outw; j++)
{
if (is_caffe == 0)
*out_ptr = (line0[0] + line0[1]) * 0.5f;
else
*out_ptr = (line0[0] + line0[1]) * 0.25f;
out_ptr++;
line0 += 2;
}
if (inw % 2 == 0)
{
if (is_caffe == 0)
*out_ptr = line0[0];
else
*out_ptr = line0[0] * 0.25f;
}
}
}
}
static void max_2x2s2_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h,
int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe)
{
int in_hw = inw * inh;
int out_hw = outh * outw;
if (inw % 2 == 0)
outw--;
if (inh % 2 == 0)
outh--;
int block_w = (outw - 1) >> 2;
int remain_w = inw - outw * 2 + 1;
for (int c = 0; c < inc; c++)
{
const float* line00 = input + c * in_hw;
float* out_ptr = output + c * out_hw;
// h begin
*out_ptr = line00[0];
out_ptr++;
line00++;
for (int j = 0; j < block_w; j++)
{
float32x4_t p00 = vld1q_f32(line00);
float32x4_t p01 = vld1q_f32(line00 + 4);
#ifdef __aarch64__
float32x4_t _max = vpmaxq_f32(p00, p01);
#else
float32x2_t max0_1 = vpmax_f32(vget_low_f32(p00), vget_high_f32(p00));
float32x2_t max0_2 = vpmax_f32(vget_low_f32(p01), vget_high_f32(p01));
float32x4_t _max = vcombine_f32(max0_1, max0_2);
#endif
vst1q_f32(out_ptr, _max);
out_ptr += 4;
line00 += 8;
}
for (int j = block_w * 4 + 1; j < outw; j++)
{
*out_ptr = fmax(line00[0], line00[1]);
out_ptr++;
line00 += 2;
}
if (inw % 2 == 0)
{
*out_ptr = line00[0];
out_ptr++;
}
line00 += remain_w;
// h center
const float* line0 = line00;
const float* line1 = line0 + inw;
for (int i = 1; i < outh; i++)
{
// w begin
*out_ptr = fmax(line0[0], line1[0]);
out_ptr++;
line0++;
line1++;
// w center
for (int j = 0; j < block_w; j++)
{
float32x4_t p00 = vld1q_f32(line0);
float32x4_t p10 = vld1q_f32(line1);
float32x4_t p01 = vld1q_f32(line0 + 4);
float32x4_t p11 = vld1q_f32(line1 + 4);
#ifdef __aarch64__
float32x4_t max0 = vmaxq_f32(p00, p10);
float32x4_t max1 = vmaxq_f32(p01, p11);
float32x4_t _max = vpmaxq_f32(max0, max1);
#else
float32x2_t max0_1 = vpmax_f32(vget_low_f32(p00), vget_low_f32(p10));
float32x2_t max0_2 = vpmax_f32(vget_high_f32(p00), vget_high_f32(p10));
max0_1 = vpmax_f32(max0_1, max0_2);
float32x2_t max1_1 = vpmax_f32(vget_low_f32(p01), vget_low_f32(p11));
float32x2_t max1_2 = vpmax_f32(vget_high_f32(p01), vget_high_f32(p11));
max1_1 = vpmax_f32(max1_1, max1_2);
float32x4_t _max = vcombine_f32(max0_1, max1_1);
#endif
vst1q_f32(out_ptr, _max);
out_ptr += 4;
line0 += 8;
line1 += 8;
}
for (int j = block_w * 4 + 1; j < outw; j++)
{
float32x2_t p1 = vld1_f32(line0);
float32x2_t p2 = vld1_f32(line1);
float32x2_t _max = vmax_f32(p1, p2);
*out_ptr = fmax(_max[0], _max[1]);
out_ptr++;
line0 += 2;
line1 += 2;
}
// w end
if (inw % 2 == 0)
{
*out_ptr = fmax(line0[0], line1[0]);
out_ptr++;
}
line0 += remain_w + inw;
line1 += remain_w + inw;
}
// h end
if (inh % 2 == 0)
{
*out_ptr = line0[0];
out_ptr++;
line0++;
for (int j = 0; j < block_w; j++)
{
float32x4_t p00 = vld1q_f32(line0);
float32x4_t p01 = vld1q_f32(line0 + 4);
#ifdef __aarch64__
float32x4_t _max = vpmaxq_f32(p00, p01);
#else
float32x2_t max0_1 = vpmax_f32(vget_low_f32(p00), vget_high_f32(p00));
float32x2_t max0_2 = vpmax_f32(vget_low_f32(p01), vget_high_f32(p01));
float32x4_t _max = vcombine_f32(max0_1, max0_2);
#endif
vst1q_f32(out_ptr, _max);
out_ptr += 4;
line0 += 8;
}
for (int j = block_w * 4 + 1; j < outw; j++)
{
*out_ptr = fmax(line0[0], line0[1]);
out_ptr++;
line0 += 2;
}
if (inw % 2 == 0)
{
*out_ptr = line0[0];
}
}
}
}
static void max_3x3s2_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h,
int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe)
{
// TLOG_ERR("max_3x3s2_p1\n");
int in_hw = inw * inh;
int out_hw = outh * outw;
if (is_caffe == 1 || inw % 2 == 1)
outw--;
if (is_caffe == 1 || inh % 2 == 1)
outh--;
int block_w = (outw - 1) >> 2;
int remain_w = inw - outw * 2 + 1;
for (int c = 0; c < inc; c++)
{
const float* line1 = input + c * in_hw;
const float* line2 = line1 + inw;
float* out_ptr = output + c * out_hw;
// h begin ---------------------------------------
*out_ptr = fmax(fmax(line1[0], line1[1]), fmax(line2[0], line2[1]));
out_ptr++;
line1 += 1;
line2 += 1;
float32x4x2_t p10 = vld2q_f32(line1);
float32x4x2_t p20 = vld2q_f32(line2);
for (int j = 0; j < block_w; j++)
{
float32x4x2_t p10_new = vld2q_f32(line1 + 8);
float32x4_t max1 = vmaxq_f32(p10.val[0], p10.val[1]);
float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1);
max1 = vmaxq_f32(max1, p11);
float32x4x2_t p20_new = vld2q_f32(line2 + 8);
float32x4_t max2 = vmaxq_f32(p20.val[0], p20.val[1]);
float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1);
max2 = vmaxq_f32(max2, p21);
max1 = vmaxq_f32(max1, max2);
vst1q_f32(out_ptr, max1);
p10 = p10_new;
p20 = p20_new;
line1 += 8;
line2 += 8;
out_ptr += 4;
}
for (int j = block_w * 4 + 1; j < outw; j++)
{
float max1 = fmax(fmax(line1[0], line1[1]), line1[2]);
float max2 = fmax(fmax(line2[0], line2[1]), line2[2]);
*out_ptr = fmax(max1, max2);
out_ptr++;
line1 += 2;
line2 += 2;
}
if (inw % 2 == 1)
{
*out_ptr = fmax(fmax(line1[0], line1[1]), fmax(line2[0], line2[1]));
out_ptr++;
}
else if (is_caffe == 1 && inw % 2 == 0)
{
*out_ptr = fmax(line1[0], line2[0]);
out_ptr++;
}
line1 += remain_w;
line2 += remain_w;
// h center ---------------------------------------
const float* line0 = line1;
line1 = line2;
line2 = line1 + inw;
for (int i = 1; i < outh; i++)
{
// left
float max0 = fmax(fmax(line1[0], line1[1]), fmax(line2[0], line2[1]));
*out_ptr = fmax(fmax(line0[0], line0[1]), max0);
out_ptr++;
line0 += 1;
line1 += 1;
line2 += 1;
// mid
float32x4x2_t p00 = vld2q_f32(line0);
float32x4x2_t p10 = vld2q_f32(line1);
float32x4x2_t p20 = vld2q_f32(line2);
for (int j = 0; j < block_w; j++)
{
float32x4x2_t p00_new = vld2q_f32(line0 + 8);
float32x4_t max0 = vmaxq_f32(p00.val[0], p00.val[1]);
float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1);
max0 = vmaxq_f32(max0, p01);
float32x4x2_t p10_new = vld2q_f32(line1 + 8);
float32x4_t max1 = vmaxq_f32(p10.val[0], p10.val[1]);
float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1);
max1 = vmaxq_f32(max1, p11);
float32x4x2_t p20_new = vld2q_f32(line2 + 8);
float32x4_t max2 = vmaxq_f32(p20.val[0], p20.val[1]);
float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1);
max2 = vmaxq_f32(max2, p21);
max0 = vmaxq_f32(vmaxq_f32(max0, max1), max2);
vst1q_f32(out_ptr, max0);
p00 = p00_new;
p10 = p10_new;
p20 = p20_new;
line0 += 8;
line1 += 8;
line2 += 8;
out_ptr += 4;
}
for (int j = block_w * 4 + 1; j < outw; j++)
{
float max0 = fmax(fmax(line0[0], line0[1]), line0[2]);
float max1 = fmax(fmax(line1[0], line1[1]), line1[2]);
float max2 = fmax(fmax(line2[0], line2[1]), line2[2]);
*out_ptr = fmax(fmax(max0, max1), max2);
out_ptr++;
line0 += 2;
line1 += 2;
line2 += 2;
}
if (inw % 2 == 1)
{
max0 = fmax(fmax(line1[0], line1[1]), fmax(line2[0], line2[1]));
*out_ptr = fmax(fmax(line0[0], line0[1]), max0);
out_ptr++;
}
else if (inw % 2 == 0 && is_caffe == 1)
{
*out_ptr = fmax(fmax(line0[0], line1[0]), line2[0]);
out_ptr++;
}
line0 += inw + remain_w;
line1 += inw + remain_w;
line2 += inw + remain_w;
}
// h end ------------------------------------------
if (inh % 2 == 1)
{
*out_ptr = fmax(fmax(line1[0], line1[1]), fmax(line0[0], line0[1]));
out_ptr++;
line0 += 1;
line1 += 1;
float32x4x2_t p00 = vld2q_f32(line0);
float32x4x2_t p10 = vld2q_f32(line1);
for (int j = 0; j < block_w; j++)
{
float32x4x2_t p00_new = vld2q_f32(line0 + 8);
float32x4_t max0 = vmaxq_f32(p00.val[0], p00.val[1]);
float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1);
max0 = vmaxq_f32(max0, p01);
float32x4x2_t p10_new = vld2q_f32(line1 + 8);
float32x4_t max1 = vmaxq_f32(p10.val[0], p10.val[1]);
float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1);
max1 = vmaxq_f32(max1, p11);
max0 = vmaxq_f32(max0, max1);
vst1q_f32(out_ptr, max0);
p00 = p00_new;
p10 = p10_new;
line0 += 8;
line1 += 8;
out_ptr += 4;
}
for (int j = block_w * 4 + 1; j < outw; j++)
{
float max0 = fmax(fmax(line0[0], line0[1]), line0[2]);
float max1 = fmax(fmax(line1[0], line1[1]), line1[2]);
*out_ptr = fmax(max0, max1);
out_ptr++;
line0 += 2;
line1 += 2;
}
if (inw % 2 == 1)
{
*out_ptr = fmax(fmax(line1[0], line1[1]), fmax(line0[0], line0[1]));
out_ptr++;
}
else if (inw % 2 == 0 && is_caffe == 1)
{
*out_ptr = fmax(line0[0], line1[0]);
out_ptr++;
}
}
else if (inh % 2 == 0 && is_caffe == 1)
{
*out_ptr = fmax(line0[0], line0[1]);
out_ptr++;
line0 += 1;
float32x4x2_t p00 = vld2q_f32(line0);
for (int j = 0; j < block_w; j++)
{
float32x4x2_t p00_new = vld2q_f32(line0 + 8);
float32x4_t max0 = vmaxq_f32(p00.val[0], p00.val[1]);
float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1);
max0 = vmaxq_f32(max0, p01);
vst1q_f32(out_ptr, max0);
p00 = p00_new;
line0 += 8;
out_ptr += 4;
}
for (int j = block_w * 4 + 1; j < outw; j++)
{
*out_ptr = fmax(fmax(line0[0], line0[1]), line0[2]);
out_ptr++;
line0 += 2;
}
if (inw % 2 == 1)
{
*out_ptr = fmax(line0[0], line0[1]);
out_ptr++;
}
else if (inw % 2 == 0)
{
*out_ptr = line0[0];
}
}
}
}
static void avg_3x3s2_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h,
int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe)
{
int in_hw = inw * inh;
int out_hw = outh * outw;
if (is_caffe == 1 || inw % 2 == 1)
outw--;
if (is_caffe == 1 || inh % 2 == 1)
outh--;
int block_w = (outw - 1) >> 2;
int remain_w = inw - outw * 2 + 1;
for (int c = 0; c < inc; c++)
{
const float* line1 = input + c * in_hw;
const float* line2 = line1 + inw;
float* out_ptr = output + c * out_hw;
// h begin ---------------------------------------
if (is_caffe == 0)
*out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.25f;
else
*out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f;
out_ptr++;
line1 += 1;
line2 += 1;
float32x4x2_t p10 = vld2q_f32(line1);
float32x4x2_t p20 = vld2q_f32(line2);
for (int j = 0; j < block_w; j++)
{
float32x4x2_t p10_new = vld2q_f32(line1 + 8);
float32x4_t sum1 = vaddq_f32(p10.val[0], p10.val[1]);
float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1);
sum1 = vaddq_f32(sum1, p11);
float32x4x2_t p20_new = vld2q_f32(line2 + 8);
float32x4_t sum2 = vaddq_f32(p20.val[0], p20.val[1]);
float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1);
sum2 = vaddq_f32(sum2, p21);
sum1 = vaddq_f32(sum1, sum2);
if (is_caffe == 0)
sum1 = vmulq_n_f32(sum1, 0.16666667f);
else
sum1 = vmulq_n_f32(sum1, 0.11111111f);
vst1q_f32(out_ptr, sum1);
p10 = p10_new;
p20 = p20_new;
line1 += 8;
line2 += 8;
out_ptr += 4;
}
for (int j = block_w * 4 + 1; j < outw; j++)
{
if (is_caffe == 0)
*out_ptr = (line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.16666667f;
else
*out_ptr = (line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.11111111f;
out_ptr++;
line1 += 2;
line2 += 2;
}
if (inw % 2 == 1)
{
if (is_caffe == 0)
*out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.25f;
else
*out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f;
out_ptr++;
}
else if (inw % 2 == 0 && is_caffe == 1)
{
*out_ptr = (line1[0] + line2[0]) * 0.16666667f;
out_ptr++;
}
line1 += remain_w;
line2 += remain_w;
// h center ---------------------------------------
const float* line0 = line1;
line1 = line2;
line2 = line1 + inw;
for (int i = 1; i < outh; i++)
{
// left
if (is_caffe == 0)
*out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.16666667f;
else
*out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f;
out_ptr++;
line0 += 1;
line1 += 1;
line2 += 1;
// mid
float32x4x2_t p00 = vld2q_f32(line0);
float32x4x2_t p10 = vld2q_f32(line1);
float32x4x2_t p20 = vld2q_f32(line2);
for (int j = 0; j < block_w; j++)
{
float32x4x2_t p00_new = vld2q_f32(line0 + 8);
float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]);
float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1);
sum0 = vaddq_f32(sum0, p01);
float32x4x2_t p10_new = vld2q_f32(line1 + 8);
float32x4_t sum1 = vaddq_f32(p10.val[0], p10.val[1]);
float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1);
sum1 = vaddq_f32(sum1, p11);
float32x4x2_t p20_new = vld2q_f32(line2 + 8);
float32x4_t sum2 = vaddq_f32(p20.val[0], p20.val[1]);
float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1);
sum2 = vaddq_f32(sum2, p21);
sum0 = vaddq_f32(vaddq_f32(sum0, sum1), sum2);
sum0 = vmulq_n_f32(sum0, 0.11111111f);
vst1q_f32(out_ptr, sum0);
p00 = p00_new;
p10 = p10_new;
p20 = p20_new;
line0 += 8;
line1 += 8;
line2 += 8;
out_ptr += 4;
}
for (int j = block_w * 4 + 1; j < outw; j++)
{
*out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.11111111f;
out_ptr++;
line0 += 2;
line1 += 2;
line2 += 2;
}
// end
if (inw % 2 == 1)
{
if (is_caffe == 0)
*out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.16666667f;
else
*out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f;
out_ptr++;
}
else if (inw % 2 == 0 && is_caffe == 1)
{
*out_ptr = (line0[0] + line1[0] + line2[0]) * 0.16666667f;
out_ptr++;
}
line0 += remain_w + inw;
line1 += remain_w + inw;
line2 += remain_w + inw;
}
// h end-------------------------------
if (inh % 2 == 1)
{
if (is_caffe == 0)
*out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f;
else
*out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.11111111f;
out_ptr++;
line0 += 1;
line1 += 1;
float32x4x2_t p00 = vld2q_f32(line0);
float32x4x2_t p10 = vld2q_f32(line1);
for (int j = 0; j < block_w; j++)
{
float32x4x2_t p00_new = vld2q_f32(line0 + 8);
float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]);
float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1);
sum0 = vaddq_f32(sum0, p01);
float32x4x2_t p10_new = vld2q_f32(line1 + 8);
float32x4_t sum1 = vaddq_f32(p10.val[0], p10.val[1]);
float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1);
sum1 = vaddq_f32(sum1, p11);
sum0 = vaddq_f32(sum0, sum1);
if (is_caffe == 0)
sum0 = vmulq_n_f32(sum0, 0.16666667f);
else
sum0 = vmulq_n_f32(sum0, 0.11111111f);
vst1q_f32(out_ptr, sum0);
p00 = p00_new;
p10 = p10_new;
line0 += 8;
line1 += 8;
out_ptr += 4;
}
for (int j = block_w * 4 + 1; j < outw; j++)
{
if (is_caffe == 0)
*out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]) * 0.16666667f;
else
*out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]) * 0.11111111f;
out_ptr++;
line0 += 2;
line1 += 2;
}
if (inw % 2 == 1)
{
if (is_caffe == 0)
*out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f;
else
*out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.11111111f;
out_ptr++;
}
else if (inw % 2 == 0 && is_caffe == 1)
{
*out_ptr = (line0[0] + line1[0]) * 0.16666667f;
out_ptr++;
}
}
else if (inw % 2 == 0 && is_caffe == 1)
{
*out_ptr = (line0[0] + line0[1]) * 0.16666667f;
out_ptr++;
line0 += 1;
float32x4x2_t p00 = vld2q_f32(line0);
for (int j = 0; j < block_w; j++)
{
float32x4x2_t p00_new = vld2q_f32(line0 + 8);
float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]);
float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1);
sum0 = vaddq_f32(sum0, p01);
sum0 = vmulq_n_f32(sum0, 0.16666667f);
vst1q_f32(out_ptr, sum0);
p00 = p00_new;
line0 += 8;
out_ptr += 4;
}
for (int j = block_w * 4 + 1; j < outw; j++)
{
*out_ptr = (line0[0] + line0[1] + line0[2]) * 0.16666667f;
out_ptr++;
line0 += 2;
}
if (inw % 2 == 1)
{
*out_ptr = (line0[0] + line0[1]) * 0.16666667f;
out_ptr++;
}
else if (inw % 2 == 0)
{
*out_ptr = line0[0] * 0.25f;
out_ptr++;
}
}
}
}
static void max_3x3s1_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h,
int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe)
{
// TLOG_ERR("max_3x3s1_p1\n");
int in_hw = inw * inh;
int mid_w = inw - 2;
int mid_h = inh - 2;
for (int c = 0; c < inc; c++)
{
const float* line1 = input + c * in_hw;
const float* line2 = line1 + inw;
float* out_ptr = output + c * in_hw;
// h begin left----[line1+=0]-----------------------------------
*out_ptr = fmax(fmax(line1[0], line1[1]), fmax(line2[0], line2[1]));
out_ptr++;
// h begin center----[line1+=1]----------------------------------
for (int j = 0; j < mid_w; j++)
{
float max1 = fmax(fmax(line1[0], line1[1]), line1[2]);
float max2 = fmax(fmax(line2[0], line2[1]), line2[2]);
*out_ptr = fmax(max2, max1);
out_ptr++;
line1 += 1;
line2 += 1;
}
// h begin right----[line1+=2]-----------------------------------
*out_ptr = fmax(fmax(line1[0], line1[1]), fmax(line2[0], line2[1]));
out_ptr++;
line1 += 2;
line2 += 2;
// h center ---------------------------------------
const float* line0 = input + c * in_hw;
for (int i = 0; i < mid_h; i++)
{
// left
float max0 = fmax(fmax(line1[0], line1[1]), fmax(line2[0], line2[1]));
*out_ptr = fmax(fmax(line0[0], line0[1]), max0);
out_ptr++;
// mid
for (int j = 0; j < mid_w; j++)
{
float max0 = fmax(fmax(line0[0], line0[1]), line0[2]);
float max1 = fmax(fmax(line1[0], line1[1]), line1[2]);
float max2 = fmax(fmax(line2[0], line2[1]), line2[2]);
*out_ptr = fmax(fmax(max0, max1), max2);
out_ptr++;
line0 += 1;
line1 += 1;
line2 += 1;
}
max0 = fmax(fmax(line1[0], line1[1]), fmax(line2[0], line2[1]));
*out_ptr = fmax(fmax(line0[0], line0[1]), max0);
out_ptr++;
line0 += 2;
line1 += 2;
line2 += 2;
}
// h end ------------------------------------------
*out_ptr = fmax(fmax(line1[0], line1[1]), fmax(line0[0], line0[1]));
out_ptr++;
for (int j = 0; j < mid_w; j++)
{
float max0 = fmax(fmax(line0[0], line0[1]), line0[2]);
float max1 = fmax(fmax(line1[0], line1[1]), line1[2]);
*out_ptr = fmax(max0, max1);
out_ptr++;
line0 += 1;
line1 += 1;
}
*out_ptr = fmax(fmax(line1[0], line1[1]), fmax(line0[0], line0[1]));
}
}
static void avg_3x3s1_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h,
int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe)
{
// TLOG_ERR("avg_3x3s1_p1\n");
int in_hw = inw * inh;
int mid_w = inw - 2;
int mid_h = inh - 2;
for (int c = 0; c < inc; c++)
{
const float* line1 = input + c * in_hw;
const float* line2 = line1 + inw;
float* out_ptr = output + c * in_hw;
// h begin left----[line1+=0]-----------------------------------
if (is_caffe == 0)
*out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.25f;
else
*out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f;
out_ptr++;
// h begin center----[line1+=1]----------------------------------
for (int j = 0; j < mid_w; j++)
{
if (is_caffe == 0)
*out_ptr = (line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.16666667f;
else
*out_ptr = (line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.11111111f;
out_ptr++;
line1 += 1;
line2 += 1;
}
// h begin right----[line1+=2]-----------------------------------
if (is_caffe == 0)
*out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.25f;
else
*out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f;
out_ptr++;
line1 += 2;
line2 += 2;
// h center ---------------------------------------
const float* line0 = input + c * in_hw;
for (int i = 0; i < mid_h; i++)
{
// left
if (is_caffe == 0)
*out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.16666667f;
else
*out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f;
out_ptr++;
// mid
for (int j = 0; j < mid_w; j++)
{
*out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.11111111f;
out_ptr++;
line0 += 1;
line1 += 1;
line2 += 1;
}
if (is_caffe == 0)
*out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.16666667f;
else
*out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f;
out_ptr++;
line0 += 2;
line1 += 2;
line2 += 2;
}
// h end ------------------------------------------
if (is_caffe == 0)
*out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f;
else
*out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.11111111f;
out_ptr++;
for (int j = 0; j < mid_w; j++)
{
if (is_caffe == 0)
*out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]) * 0.16666667f;
else
*out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]) * 0.11111111f;
out_ptr++;
line0 += 1;
line1 += 1;
}
if (is_caffe == 0)
*out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f;
else
*out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.11111111f;
}
}
static void avg_global(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h,
int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe)
{
int in_hw = inw * inh;
int block = in_hw >> 3;
int tail = in_hw & ~7;
for (int c = 0; c < inc; c++)
{
const float* line0 = input + c * in_hw;
float* out_ptr = output + c;
float sum = 0.f;
for (int j = 0; j < block; j++)
{
float32x4_t p00 = vld1q_f32(line0);
float32x4_t p01 = vld1q_f32(line0 + 4);
p00 = vaddq_f32(p00, p01);
// p00=vpaddq_f32(p00,p00);
// sum+=(p00[0]+p00[1]);
sum += (p00[0] + p00[1] + p00[2] + p00[3]);
line0 += 8;
}
for (int j = tail; j < in_hw; j++)
{
sum += line0[0];
line0++;
}
*out_ptr = sum / in_hw;
}
}
static void max_global(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h,
int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe)
{
int in_hw = inw * inh;
int block = in_hw >> 3;
int tail = in_hw & ~7;
for (int c = 0; c < inc; c++)
{
const float* line0 = input + c * in_hw;
float* out_ptr = output + c;
float32x4_t p00 = vld1q_f32(line0);
float32x4_t res = p00;
for (int j = 0; j < block; j++)
{
float32x4_t p00 = vld1q_f32(line0);
float32x4_t p01 = vld1q_f32(line0 + 4);
float32x4_t max0 = vmaxq_f32(p00, p01);
res = vmaxq_f32(res, max0);
line0 += 8;
}
float max_ = fmax(fmax(res[0], res[1]), fmax(res[2], res[3]));
for (int j = tail; j < in_hw; j++)
{
max_ = fmax(max_, line0[0]);
line0++;
}
*out_ptr = max_;
}
}
int pooling_kernel_perf_prerun(struct tensor* input, struct tensor* out, struct pool_param* param)
{
int pool_size = POOL_GENERIC;
/* global pooling */
if (param->global)
{
if (param->pool_method == POOL_AVG)
param->funct = (pooling_kernel_t)avg_global;
else if (param->pool_method == POOL_MAX)
param->funct = (pooling_kernel_t)max_global;
assert(param->funct != NULL);
return 0;
}
/* general pooling */
if (param->stride_h == 2 && param->stride_w == 2)
{
if (param->kernel_h == 2 && param->kernel_w == 2)
pool_size = POOL_K2S2;
else if (param->kernel_h == 3 && param->kernel_w == 3)
pool_size = POOL_K3S2;
}
else if (param->stride_h == 1 && param->stride_w == 1)
{
if (param->kernel_h == 3 && param->kernel_w == 3)
pool_size = POOL_K3S1;
}
/* general max pooling, k2s2, k2k2p1, k3s1p1, k3s2, k3s2p1 */
if (param->pool_method == POOL_MAX)
{
if ((param->pad_h0 == param->pad_w0) && (param->pad_h1 == param->pad_w1))
{
if (param->pad_h0 == 0)
{
if (pool_size == POOL_K2S2)
param->funct = (pooling_kernel_t)max_2x2s2;
else if (pool_size == POOL_K3S2)
param->funct = (pooling_kernel_t)max_3x3s2;
}
else if (param->pad_h0 == 1)
{
if (pool_size == POOL_K2S2)
param->funct = (pooling_kernel_t)max_2x2s2_p1;
else if (pool_size == POOL_K3S2)
param->funct = (pooling_kernel_t)max_3x3s2_p1;
else if (pool_size == POOL_K3S1)
param->funct = (pooling_kernel_t)max_3x3s1_p1;
}
}
if (param->funct != NULL)
return 0;
else
{
TLOG_ERR("perf general max pooling func not be find\n");
return -1;
}
}
/* general avg pooling, k2s2, k2s2p1, k3s2, k3s2p1 */
if (param->pool_method == POOL_AVG)
{
if ((param->pad_h0 == param->pad_w0) && (param->pad_h1 == param->pad_w1))
{
if (param->pad_h0 == 0 && param->pad_h1 == 0)
{
if (pool_size == POOL_K2S2)
param->funct = (pooling_kernel_t)avg_2x2s2;
else if (pool_size == POOL_K3S2)
param->funct = (pooling_kernel_t)avg_3x3s2;
}
else if (param->pad_h0 == 1 && param->pad_h1 == 1)
{
if (pool_size == POOL_K2S2)
param->funct = (pooling_kernel_t)avg_2x2s2_p1;
else if (pool_size == POOL_K3S2)
param->funct = (pooling_kernel_t)avg_3x3s2_p1;
else if (pool_size == POOL_K3S1)
param->funct = (pooling_kernel_t)avg_3x3s1_p1;
}
else if (param->pad_h0 == 0 && param->pad_h1 == 1)
{
if (pool_size == POOL_K3S2)
param->funct = (pooling_kernel_t)avg_3x3s2;
}
}
if (param->funct != NULL)
return 0;
else
{
TLOG_ERR("perf general avg pooling func not be find\n");
return -1;
}
}
TLOG_ERR("perf pooling func not be find\n");
return -1;
}
int pooling_kernel_perf_run(struct tensor* input, struct tensor* output, struct pool_param* param, int num_thread)
{
// TLOG_ERR("perf pooling_kernel_run\n");
int is_caffe = param->caffe_flavor;
pooling_kernel_t kernel = (pooling_kernel_t)(param->funct);
int batch = input->dims[0];
int c = input->dims[1];
int in_h = input->dims[2];
int in_w = input->dims[3];
int out_h = output->dims[2];
int out_w = output->dims[3];
int img_size = c * in_h * in_w;
int feature_size = c * out_h * out_w;
for (int n = 0; n < batch; n++)
{
void* input_frame = input->data + n * img_size * input->elem_size;
void* output_frame = output->data + n * feature_size * output->elem_size;
#pragma omp parallel for num_threads(num_thread)
for (int ch = 0; ch < c; ch++)
{
void* cur_input = input_frame + ch * in_h * in_w * input->elem_size;
void* cur_output = output_frame + ch * out_h * out_w * output->elem_size;
kernel(cur_input, cur_output, 1, in_h, in_w, out_h, out_w, param->kernel_h, param->kernel_w,
param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->pad_h1, param->pad_w1,
is_caffe);
}
}
return 0;
}
|
gimple-pretty-print.c | /* Pretty formatting of GIMPLE statements and expressions.
Copyright (C) 2001-2018 Free Software Foundation, Inc.
Contributed by Aldy Hernandez <aldyh@redhat.com> and
Diego Novillo <dnovillo@google.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "dumpfile.h"
#include "backend.h"
#include "tree.h"
#include "gimple.h"
#include "gimple-predict.h"
#include "ssa.h"
#include "cgraph.h"
#include "gimple-pretty-print.h"
#include "internal-fn.h"
#include "tree-eh.h"
#include "gimple-iterator.h"
#include "tree-cfg.h"
#include "dumpfile.h" /* for dump_flags */
#include "value-prof.h"
#include "trans-mem.h"
#include "cfganal.h"
#include "stringpool.h"
#include "attribs.h"
#include "asan.h"
#define INDENT(SPACE) \
do { int i; for (i = 0; i < SPACE; i++) pp_space (buffer); } while (0)
#define GIMPLE_NIY do_niy (buffer,gs)
/* Try to print on BUFFER a default message for the unrecognized
gimple statement GS. */
static void
do_niy (pretty_printer *buffer, gimple *gs)
{
pp_printf (buffer, "<<< Unknown GIMPLE statement: %s >>>\n",
gimple_code_name[(int) gimple_code (gs)]);
}
/* Emit a newline and SPC indentation spaces to BUFFER. */
static void
newline_and_indent (pretty_printer *buffer, int spc)
{
pp_newline (buffer);
INDENT (spc);
}
/* Print the GIMPLE statement GS on stderr. */
DEBUG_FUNCTION void
debug_gimple_stmt (gimple *gs)
{
print_gimple_stmt (stderr, gs, 0, TDF_VOPS|TDF_MEMSYMS);
}
/* Return formatted string of a VALUE probability
(biased by REG_BR_PROB_BASE). Returned string is allocated
by xstrdup_for_dump. */
static const char *
dump_profile (profile_count &count)
{
char *buf = NULL;
if (!count.initialized_p ())
return "";
if (count.ipa_p ())
buf = xasprintf ("[count: %" PRId64 "]",
count.to_gcov_type ());
else if (count.initialized_p ())
buf = xasprintf ("[local count: %" PRId64 "]",
count.to_gcov_type ());
const char *ret = xstrdup_for_dump (buf);
free (buf);
return ret;
}
/* Return formatted string of a VALUE probability
(biased by REG_BR_PROB_BASE). Returned string is allocated
by xstrdup_for_dump. */
static const char *
dump_probability (profile_probability probability)
{
float minimum = 0.01f;
float fvalue = -1;
if (probability.initialized_p ())
{
fvalue = probability.to_reg_br_prob_base () * 100.0f / REG_BR_PROB_BASE;
if (fvalue < minimum && probability.to_reg_br_prob_base ())
fvalue = minimum;
}
char *buf;
if (probability.initialized_p ())
buf = xasprintf ("[%.2f%%]", fvalue);
else
buf = xasprintf ("[INV]");
const char *ret = xstrdup_for_dump (buf);
free (buf);
return ret;
}
/* Dump E probability to BUFFER. */
static void
dump_edge_probability (pretty_printer *buffer, edge e)
{
pp_scalar (buffer, " %s", dump_probability (e->probability));
}
/* Print GIMPLE statement G to FILE using SPC indentation spaces and
FLAGS as in pp_gimple_stmt_1. */
void
print_gimple_stmt (FILE *file, gimple *g, int spc, dump_flags_t flags)
{
pretty_printer buffer;
pp_needs_newline (&buffer) = true;
buffer.buffer->stream = file;
pp_gimple_stmt_1 (&buffer, g, spc, flags);
pp_newline_and_flush (&buffer);
}
DEBUG_FUNCTION void
debug (gimple &ref)
{
print_gimple_stmt (stderr, &ref, 0, 0);
}
DEBUG_FUNCTION void
debug (gimple *ptr)
{
if (ptr)
debug (*ptr);
else
fprintf (stderr, "<nil>\n");
}
/* Print GIMPLE statement G to FILE using SPC indentation spaces and
FLAGS as in pp_gimple_stmt_1. Print only the right-hand side
of the statement. */
void
print_gimple_expr (FILE *file, gimple *g, int spc, dump_flags_t flags)
{
flags |= TDF_RHS_ONLY;
pretty_printer buffer;
pp_needs_newline (&buffer) = true;
buffer.buffer->stream = file;
pp_gimple_stmt_1 (&buffer, g, spc, flags);
pp_flush (&buffer);
}
/* Print the GIMPLE sequence SEQ on BUFFER using SPC indentation
spaces and FLAGS as in pp_gimple_stmt_1.
The caller is responsible for calling pp_flush on BUFFER to finalize
the pretty printer. */
static void
dump_gimple_seq (pretty_printer *buffer, gimple_seq seq, int spc,
dump_flags_t flags)
{
gimple_stmt_iterator i;
for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
{
gimple *gs = gsi_stmt (i);
INDENT (spc);
pp_gimple_stmt_1 (buffer, gs, spc, flags);
if (!gsi_one_before_end_p (i))
pp_newline (buffer);
}
}
/* Print GIMPLE sequence SEQ to FILE using SPC indentation spaces and
FLAGS as in pp_gimple_stmt_1. */
void
print_gimple_seq (FILE *file, gimple_seq seq, int spc, dump_flags_t flags)
{
pretty_printer buffer;
pp_needs_newline (&buffer) = true;
buffer.buffer->stream = file;
dump_gimple_seq (&buffer, seq, spc, flags);
pp_newline_and_flush (&buffer);
}
/* Print the GIMPLE sequence SEQ on stderr. */
DEBUG_FUNCTION void
debug_gimple_seq (gimple_seq seq)
{
print_gimple_seq (stderr, seq, 0, TDF_VOPS|TDF_MEMSYMS);
}
/* A simple helper to pretty-print some of the gimple tuples in the printf
style. The format modifiers are preceded by '%' and are:
'G' - outputs a string corresponding to the code of the given gimple,
'S' - outputs a gimple_seq with indent of spc + 2,
'T' - outputs the tree t,
'd' - outputs an int as a decimal,
's' - outputs a string,
'n' - outputs a newline,
'x' - outputs an int as hexadecimal,
'+' - increases indent by 2 then outputs a newline,
'-' - decreases indent by 2 then outputs a newline. */
static void
dump_gimple_fmt (pretty_printer *buffer, int spc, dump_flags_t flags,
const char *fmt, ...)
{
va_list args;
const char *c;
const char *tmp;
va_start (args, fmt);
for (c = fmt; *c; c++)
{
if (*c == '%')
{
gimple_seq seq;
tree t;
gimple *g;
switch (*++c)
{
case 'G':
g = va_arg (args, gimple *);
tmp = gimple_code_name[gimple_code (g)];
pp_string (buffer, tmp);
break;
case 'S':
seq = va_arg (args, gimple_seq);
pp_newline (buffer);
dump_gimple_seq (buffer, seq, spc + 2, flags);
newline_and_indent (buffer, spc);
break;
case 'T':
t = va_arg (args, tree);
if (t == NULL_TREE)
pp_string (buffer, "NULL");
else
dump_generic_node (buffer, t, spc, flags, false);
break;
case 'd':
pp_decimal_int (buffer, va_arg (args, int));
break;
case 's':
pp_string (buffer, va_arg (args, char *));
break;
case 'n':
newline_and_indent (buffer, spc);
break;
case 'x':
pp_scalar (buffer, "%x", va_arg (args, int));
break;
case '+':
spc += 2;
newline_and_indent (buffer, spc);
break;
case '-':
spc -= 2;
newline_and_indent (buffer, spc);
break;
default:
gcc_unreachable ();
}
}
else
pp_character (buffer, *c);
}
va_end (args);
}
/* Helper for dump_gimple_assign. Print the unary RHS of the
assignment GS. BUFFER, SPC and FLAGS are as in pp_gimple_stmt_1. */
static void
dump_unary_rhs (pretty_printer *buffer, gassign *gs, int spc,
dump_flags_t flags)
{
enum tree_code rhs_code = gimple_assign_rhs_code (gs);
tree lhs = gimple_assign_lhs (gs);
tree rhs = gimple_assign_rhs1 (gs);
switch (rhs_code)
{
case VIEW_CONVERT_EXPR:
case ASSERT_EXPR:
dump_generic_node (buffer, rhs, spc, flags, false);
break;
case FIXED_CONVERT_EXPR:
case ADDR_SPACE_CONVERT_EXPR:
case FIX_TRUNC_EXPR:
case FLOAT_EXPR:
CASE_CONVERT:
pp_left_paren (buffer);
dump_generic_node (buffer, TREE_TYPE (lhs), spc, flags, false);
pp_string (buffer, ") ");
if (op_prio (rhs) < op_code_prio (rhs_code))
{
pp_left_paren (buffer);
dump_generic_node (buffer, rhs, spc, flags, false);
pp_right_paren (buffer);
}
else
dump_generic_node (buffer, rhs, spc, flags, false);
break;
case PAREN_EXPR:
pp_string (buffer, "((");
dump_generic_node (buffer, rhs, spc, flags, false);
pp_string (buffer, "))");
break;
case ABS_EXPR:
if (flags & TDF_GIMPLE)
{
pp_string (buffer, "__ABS ");
dump_generic_node (buffer, rhs, spc, flags, false);
}
else
{
pp_string (buffer, "ABS_EXPR <");
dump_generic_node (buffer, rhs, spc, flags, false);
pp_greater (buffer);
}
break;
default:
if (TREE_CODE_CLASS (rhs_code) == tcc_declaration
|| TREE_CODE_CLASS (rhs_code) == tcc_constant
|| TREE_CODE_CLASS (rhs_code) == tcc_reference
|| rhs_code == SSA_NAME
|| rhs_code == ADDR_EXPR
|| rhs_code == CONSTRUCTOR)
{
dump_generic_node (buffer, rhs, spc, flags, false);
break;
}
else if (rhs_code == BIT_NOT_EXPR)
pp_complement (buffer);
else if (rhs_code == TRUTH_NOT_EXPR)
pp_exclamation (buffer);
else if (rhs_code == NEGATE_EXPR)
pp_minus (buffer);
else
{
pp_left_bracket (buffer);
pp_string (buffer, get_tree_code_name (rhs_code));
pp_string (buffer, "] ");
}
if (op_prio (rhs) < op_code_prio (rhs_code))
{
pp_left_paren (buffer);
dump_generic_node (buffer, rhs, spc, flags, false);
pp_right_paren (buffer);
}
else
dump_generic_node (buffer, rhs, spc, flags, false);
break;
}
}
/* Helper for dump_gimple_assign. Print the binary RHS of the
assignment GS. BUFFER, SPC and FLAGS are as in pp_gimple_stmt_1. */
static void
dump_binary_rhs (pretty_printer *buffer, gassign *gs, int spc,
dump_flags_t flags)
{
const char *p;
enum tree_code code = gimple_assign_rhs_code (gs);
switch (code)
{
case COMPLEX_EXPR:
case MIN_EXPR:
case MAX_EXPR:
case VEC_WIDEN_MULT_HI_EXPR:
case VEC_WIDEN_MULT_LO_EXPR:
case VEC_WIDEN_MULT_EVEN_EXPR:
case VEC_WIDEN_MULT_ODD_EXPR:
case VEC_PACK_TRUNC_EXPR:
case VEC_PACK_SAT_EXPR:
case VEC_PACK_FIX_TRUNC_EXPR:
case VEC_WIDEN_LSHIFT_HI_EXPR:
case VEC_WIDEN_LSHIFT_LO_EXPR:
case VEC_SERIES_EXPR:
for (p = get_tree_code_name (code); *p; p++)
pp_character (buffer, TOUPPER (*p));
pp_string (buffer, " <");
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
pp_greater (buffer);
break;
default:
if (op_prio (gimple_assign_rhs1 (gs)) <= op_code_prio (code))
{
pp_left_paren (buffer);
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags,
false);
pp_right_paren (buffer);
}
else
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_space (buffer);
pp_string (buffer, op_symbol_code (gimple_assign_rhs_code (gs)));
pp_space (buffer);
if (op_prio (gimple_assign_rhs2 (gs)) <= op_code_prio (code))
{
pp_left_paren (buffer);
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags,
false);
pp_right_paren (buffer);
}
else
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
}
}
/* Helper for dump_gimple_assign. Print the ternary RHS of the
assignment GS. BUFFER, SPC and FLAGS are as in pp_gimple_stmt_1. */
static void
dump_ternary_rhs (pretty_printer *buffer, gassign *gs, int spc,
dump_flags_t flags)
{
const char *p;
enum tree_code code = gimple_assign_rhs_code (gs);
switch (code)
{
case WIDEN_MULT_PLUS_EXPR:
case WIDEN_MULT_MINUS_EXPR:
for (p = get_tree_code_name (code); *p; p++)
pp_character (buffer, TOUPPER (*p));
pp_string (buffer, " <");
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs3 (gs), spc, flags, false);
pp_greater (buffer);
break;
case FMA_EXPR:
if (flags & TDF_GIMPLE)
{
pp_string (buffer, "__FMA (");
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_comma (buffer);
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
pp_comma (buffer);
dump_generic_node (buffer, gimple_assign_rhs3 (gs), spc, flags, false);
pp_right_paren (buffer);
}
else
{
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_string (buffer, " * ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
pp_string (buffer, " + ");
dump_generic_node (buffer, gimple_assign_rhs3 (gs), spc, flags, false);
}
break;
case DOT_PROD_EXPR:
pp_string (buffer, "DOT_PROD_EXPR <");
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs3 (gs), spc, flags, false);
pp_greater (buffer);
break;
case SAD_EXPR:
pp_string (buffer, "SAD_EXPR <");
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs3 (gs), spc, flags, false);
pp_greater (buffer);
break;
case VEC_PERM_EXPR:
pp_string (buffer, "VEC_PERM_EXPR <");
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs3 (gs), spc, flags, false);
pp_greater (buffer);
break;
case REALIGN_LOAD_EXPR:
pp_string (buffer, "REALIGN_LOAD <");
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs3 (gs), spc, flags, false);
pp_greater (buffer);
break;
case COND_EXPR:
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_string (buffer, " ? ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
pp_string (buffer, " : ");
dump_generic_node (buffer, gimple_assign_rhs3 (gs), spc, flags, false);
break;
case VEC_COND_EXPR:
pp_string (buffer, "VEC_COND_EXPR <");
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs3 (gs), spc, flags, false);
pp_greater (buffer);
break;
case BIT_INSERT_EXPR:
pp_string (buffer, "BIT_INSERT_EXPR <");
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs3 (gs), spc, flags, false);
pp_string (buffer, " (");
if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs2 (gs))))
pp_decimal_int (buffer,
TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs2 (gs))));
else
dump_generic_node (buffer,
TYPE_SIZE (TREE_TYPE (gimple_assign_rhs2 (gs))),
spc, flags, false);
pp_string (buffer, " bits)>");
break;
default:
gcc_unreachable ();
}
}
/* Dump the gimple assignment GS. BUFFER, SPC and FLAGS are as in
pp_gimple_stmt_1. */
static void
dump_gimple_assign (pretty_printer *buffer, gassign *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
{
tree arg1 = NULL;
tree arg2 = NULL;
tree arg3 = NULL;
switch (gimple_num_ops (gs))
{
case 4:
arg3 = gimple_assign_rhs3 (gs);
/* FALLTHRU */
case 3:
arg2 = gimple_assign_rhs2 (gs);
/* FALLTHRU */
case 2:
arg1 = gimple_assign_rhs1 (gs);
break;
default:
gcc_unreachable ();
}
dump_gimple_fmt (buffer, spc, flags, "%G <%s, %T, %T, %T, %T>", gs,
get_tree_code_name (gimple_assign_rhs_code (gs)),
gimple_assign_lhs (gs), arg1, arg2, arg3);
}
else
{
if (!(flags & TDF_RHS_ONLY))
{
dump_generic_node (buffer, gimple_assign_lhs (gs), spc, flags, false);
pp_space (buffer);
pp_equal (buffer);
if (gimple_assign_nontemporal_move_p (gs))
pp_string (buffer, "{nt}");
if (gimple_has_volatile_ops (gs))
pp_string (buffer, "{v}");
pp_space (buffer);
}
if (gimple_num_ops (gs) == 2)
dump_unary_rhs (buffer, gs, spc, flags);
else if (gimple_num_ops (gs) == 3)
dump_binary_rhs (buffer, gs, spc, flags);
else if (gimple_num_ops (gs) == 4)
dump_ternary_rhs (buffer, gs, spc, flags);
else
gcc_unreachable ();
if (!(flags & TDF_RHS_ONLY))
pp_semicolon (buffer);
}
}
/* Dump the return statement GS. BUFFER, SPC and FLAGS are as in
pp_gimple_stmt_1. */
static void
dump_gimple_return (pretty_printer *buffer, greturn *gs, int spc,
dump_flags_t flags)
{
tree t, t2;
t = gimple_return_retval (gs);
t2 = gimple_return_retbnd (gs);
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%T %T>", gs, t, t2);
else
{
pp_string (buffer, "return");
if (t)
{
pp_space (buffer);
dump_generic_node (buffer, t, spc, flags, false);
}
if (t2)
{
pp_string (buffer, ", ");
dump_generic_node (buffer, t2, spc, flags, false);
}
pp_semicolon (buffer);
}
}
/* Dump the call arguments for a gimple call. BUFFER, FLAGS are as in
dump_gimple_call. */
static void
dump_gimple_call_args (pretty_printer *buffer, gcall *gs, dump_flags_t flags)
{
size_t i = 0;
/* Pretty print first arg to certain internal fns. */
if (gimple_call_internal_p (gs))
{
const char *const *enums = NULL;
unsigned limit = 0;
switch (gimple_call_internal_fn (gs))
{
case IFN_UNIQUE:
#define DEF(X) #X
static const char *const unique_args[] = {IFN_UNIQUE_CODES};
#undef DEF
enums = unique_args;
limit = ARRAY_SIZE (unique_args);
break;
case IFN_GOACC_LOOP:
#define DEF(X) #X
static const char *const loop_args[] = {IFN_GOACC_LOOP_CODES};
#undef DEF
enums = loop_args;
limit = ARRAY_SIZE (loop_args);
break;
case IFN_GOACC_REDUCTION:
#define DEF(X) #X
static const char *const reduction_args[]
= {IFN_GOACC_REDUCTION_CODES};
#undef DEF
enums = reduction_args;
limit = ARRAY_SIZE (reduction_args);
break;
case IFN_ASAN_MARK:
#define DEF(X) #X
static const char *const asan_mark_args[] = {IFN_ASAN_MARK_FLAGS};
#undef DEF
enums = asan_mark_args;
limit = ARRAY_SIZE (asan_mark_args);
break;
default:
break;
}
if (limit)
{
tree arg0 = gimple_call_arg (gs, 0);
HOST_WIDE_INT v;
if (TREE_CODE (arg0) == INTEGER_CST
&& tree_fits_shwi_p (arg0)
&& (v = tree_to_shwi (arg0)) >= 0 && v < limit)
{
i++;
pp_string (buffer, enums[v]);
}
}
}
for (; i < gimple_call_num_args (gs); i++)
{
if (i)
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_call_arg (gs, i), 0, flags, false);
}
if (gimple_call_va_arg_pack_p (gs))
{
if (i)
pp_string (buffer, ", ");
pp_string (buffer, "__builtin_va_arg_pack ()");
}
}
/* Dump the points-to solution *PT to BUFFER. */
static void
pp_points_to_solution (pretty_printer *buffer, struct pt_solution *pt)
{
if (pt->anything)
{
pp_string (buffer, "anything ");
return;
}
if (pt->nonlocal)
pp_string (buffer, "nonlocal ");
if (pt->escaped)
pp_string (buffer, "escaped ");
if (pt->ipa_escaped)
pp_string (buffer, "unit-escaped ");
if (pt->null)
pp_string (buffer, "null ");
if (pt->vars
&& !bitmap_empty_p (pt->vars))
{
bitmap_iterator bi;
unsigned i;
pp_string (buffer, "{ ");
EXECUTE_IF_SET_IN_BITMAP (pt->vars, 0, i, bi)
{
pp_string (buffer, "D.");
pp_decimal_int (buffer, i);
pp_space (buffer);
}
pp_right_brace (buffer);
if (pt->vars_contains_nonlocal
|| pt->vars_contains_escaped
|| pt->vars_contains_escaped_heap
|| pt->vars_contains_restrict)
{
const char *comma = "";
pp_string (buffer, " (");
if (pt->vars_contains_nonlocal)
{
pp_string (buffer, "nonlocal");
comma = ", ";
}
if (pt->vars_contains_escaped)
{
pp_string (buffer, comma);
pp_string (buffer, "escaped");
comma = ", ";
}
if (pt->vars_contains_escaped_heap)
{
pp_string (buffer, comma);
pp_string (buffer, "escaped heap");
comma = ", ";
}
if (pt->vars_contains_restrict)
{
pp_string (buffer, comma);
pp_string (buffer, "restrict");
comma = ", ";
}
if (pt->vars_contains_interposable)
{
pp_string (buffer, comma);
pp_string (buffer, "interposable");
}
pp_string (buffer, ")");
}
}
}
/* Dump the call statement GS. BUFFER, SPC and FLAGS are as in
pp_gimple_stmt_1. */
static void
dump_gimple_call (pretty_printer *buffer, gcall *gs, int spc,
dump_flags_t flags)
{
tree lhs = gimple_call_lhs (gs);
tree fn = gimple_call_fn (gs);
if (flags & TDF_ALIAS)
{
struct pt_solution *pt;
pt = gimple_call_use_set (gs);
if (!pt_solution_empty_p (pt))
{
pp_string (buffer, "# USE = ");
pp_points_to_solution (buffer, pt);
newline_and_indent (buffer, spc);
}
pt = gimple_call_clobber_set (gs);
if (!pt_solution_empty_p (pt))
{
pp_string (buffer, "# CLB = ");
pp_points_to_solution (buffer, pt);
newline_and_indent (buffer, spc);
}
}
if (flags & TDF_RAW)
{
if (gimple_call_internal_p (gs))
dump_gimple_fmt (buffer, spc, flags, "%G <%s, %T", gs,
internal_fn_name (gimple_call_internal_fn (gs)), lhs);
else
dump_gimple_fmt (buffer, spc, flags, "%G <%T, %T", gs, fn, lhs);
if (gimple_call_num_args (gs) > 0)
{
pp_string (buffer, ", ");
dump_gimple_call_args (buffer, gs, flags);
}
pp_greater (buffer);
}
else
{
if (lhs && !(flags & TDF_RHS_ONLY))
{
dump_generic_node (buffer, lhs, spc, flags, false);
pp_string (buffer, " =");
if (gimple_has_volatile_ops (gs))
pp_string (buffer, "{v}");
pp_space (buffer);
}
if (gimple_call_internal_p (gs))
pp_string (buffer, internal_fn_name (gimple_call_internal_fn (gs)));
else
print_call_name (buffer, fn, flags);
pp_string (buffer, " (");
dump_gimple_call_args (buffer, gs, flags);
pp_right_paren (buffer);
if (!(flags & TDF_RHS_ONLY))
pp_semicolon (buffer);
}
if (gimple_call_chain (gs))
{
pp_string (buffer, " [static-chain: ");
dump_generic_node (buffer, gimple_call_chain (gs), spc, flags, false);
pp_right_bracket (buffer);
}
if (gimple_call_return_slot_opt_p (gs))
pp_string (buffer, " [return slot optimization]");
if (gimple_call_tail_p (gs))
pp_string (buffer, " [tail call]");
if (gimple_call_must_tail_p (gs))
pp_string (buffer, " [must tail call]");
if (fn == NULL)
return;
/* Dump the arguments of _ITM_beginTransaction sanely. */
if (TREE_CODE (fn) == ADDR_EXPR)
fn = TREE_OPERAND (fn, 0);
if (TREE_CODE (fn) == FUNCTION_DECL && decl_is_tm_clone (fn))
pp_string (buffer, " [tm-clone]");
if (TREE_CODE (fn) == FUNCTION_DECL
&& DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_START
&& gimple_call_num_args (gs) > 0)
{
tree t = gimple_call_arg (gs, 0);
unsigned HOST_WIDE_INT props;
gcc_assert (TREE_CODE (t) == INTEGER_CST);
pp_string (buffer, " [ ");
/* Get the transaction code properties. */
props = TREE_INT_CST_LOW (t);
if (props & PR_INSTRUMENTEDCODE)
pp_string (buffer, "instrumentedCode ");
if (props & PR_UNINSTRUMENTEDCODE)
pp_string (buffer, "uninstrumentedCode ");
if (props & PR_HASNOXMMUPDATE)
pp_string (buffer, "hasNoXMMUpdate ");
if (props & PR_HASNOABORT)
pp_string (buffer, "hasNoAbort ");
if (props & PR_HASNOIRREVOCABLE)
pp_string (buffer, "hasNoIrrevocable ");
if (props & PR_DOESGOIRREVOCABLE)
pp_string (buffer, "doesGoIrrevocable ");
if (props & PR_HASNOSIMPLEREADS)
pp_string (buffer, "hasNoSimpleReads ");
if (props & PR_AWBARRIERSOMITTED)
pp_string (buffer, "awBarriersOmitted ");
if (props & PR_RARBARRIERSOMITTED)
pp_string (buffer, "RaRBarriersOmitted ");
if (props & PR_UNDOLOGCODE)
pp_string (buffer, "undoLogCode ");
if (props & PR_PREFERUNINSTRUMENTED)
pp_string (buffer, "preferUninstrumented ");
if (props & PR_EXCEPTIONBLOCK)
pp_string (buffer, "exceptionBlock ");
if (props & PR_HASELSE)
pp_string (buffer, "hasElse ");
if (props & PR_READONLY)
pp_string (buffer, "readOnly ");
pp_right_bracket (buffer);
}
}
/* Dump the switch statement GS. BUFFER, SPC and FLAGS are as in
pp_gimple_stmt_1. */
static void
dump_gimple_switch (pretty_printer *buffer, gswitch *gs, int spc,
dump_flags_t flags)
{
unsigned int i;
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%T, ", gs,
gimple_switch_index (gs));
else
{
pp_string (buffer, "switch (");
dump_generic_node (buffer, gimple_switch_index (gs), spc, flags, true);
if (flags & TDF_GIMPLE)
pp_string (buffer, ") {");
else
pp_string (buffer, ") <");
}
for (i = 0; i < gimple_switch_num_labels (gs); i++)
{
tree case_label = gimple_switch_label (gs, i);
gcc_checking_assert (case_label != NULL_TREE);
dump_generic_node (buffer, case_label, spc, flags, false);
pp_space (buffer);
tree label = CASE_LABEL (case_label);
dump_generic_node (buffer, label, spc, flags, false);
if (cfun && cfun->cfg)
{
basic_block dest = label_to_block (label);
if (dest)
{
edge label_edge = find_edge (gimple_bb (gs), dest);
if (label_edge && !(flags & TDF_GIMPLE))
dump_edge_probability (buffer, label_edge);
}
}
if (i < gimple_switch_num_labels (gs) - 1)
{
if (flags & TDF_GIMPLE)
pp_string (buffer, "; ");
else
pp_string (buffer, ", ");
}
}
if (flags & TDF_GIMPLE)
pp_string (buffer, "; }");
else
pp_greater (buffer);
}
/* Dump the gimple conditional GS. BUFFER, SPC and FLAGS are as in
pp_gimple_stmt_1. */
static void
dump_gimple_cond (pretty_printer *buffer, gcond *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%s, %T, %T, %T, %T>", gs,
get_tree_code_name (gimple_cond_code (gs)),
gimple_cond_lhs (gs), gimple_cond_rhs (gs),
gimple_cond_true_label (gs), gimple_cond_false_label (gs));
else
{
if (!(flags & TDF_RHS_ONLY))
pp_string (buffer, "if (");
dump_generic_node (buffer, gimple_cond_lhs (gs), spc, flags, false);
pp_space (buffer);
pp_string (buffer, op_symbol_code (gimple_cond_code (gs)));
pp_space (buffer);
dump_generic_node (buffer, gimple_cond_rhs (gs), spc, flags, false);
if (!(flags & TDF_RHS_ONLY))
{
edge_iterator ei;
edge e, true_edge = NULL, false_edge = NULL;
basic_block bb = gimple_bb (gs);
if (bb)
{
FOR_EACH_EDGE (e, ei, bb->succs)
{
if (e->flags & EDGE_TRUE_VALUE)
true_edge = e;
else if (e->flags & EDGE_FALSE_VALUE)
false_edge = e;
}
}
bool has_edge_info = true_edge != NULL && false_edge != NULL;
pp_right_paren (buffer);
if (gimple_cond_true_label (gs))
{
pp_string (buffer, " goto ");
dump_generic_node (buffer, gimple_cond_true_label (gs),
spc, flags, false);
if (has_edge_info && !(flags & TDF_GIMPLE))
dump_edge_probability (buffer, true_edge);
pp_semicolon (buffer);
}
if (gimple_cond_false_label (gs))
{
pp_string (buffer, " else goto ");
dump_generic_node (buffer, gimple_cond_false_label (gs),
spc, flags, false);
if (has_edge_info && !(flags & TDF_GIMPLE))
dump_edge_probability (buffer, false_edge);
pp_semicolon (buffer);
}
}
}
}
/* Dump a GIMPLE_LABEL tuple on the pretty_printer BUFFER, SPC
spaces of indent. FLAGS specifies details to show in the dump (see
TDF_* in dumpfils.h). */
static void
dump_gimple_label (pretty_printer *buffer, glabel *gs, int spc,
dump_flags_t flags)
{
tree label = gimple_label_label (gs);
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%T>", gs, label);
else
{
dump_generic_node (buffer, label, spc, flags, false);
pp_colon (buffer);
}
if (flags & TDF_GIMPLE)
return;
if (DECL_NONLOCAL (label))
pp_string (buffer, " [non-local]");
if ((flags & TDF_EH) && EH_LANDING_PAD_NR (label))
pp_printf (buffer, " [LP %d]", EH_LANDING_PAD_NR (label));
}
/* Dump a GIMPLE_GOTO tuple on the pretty_printer BUFFER, SPC
spaces of indent. FLAGS specifies details to show in the dump (see
TDF_* in dumpfile.h). */
static void
dump_gimple_goto (pretty_printer *buffer, ggoto *gs, int spc,
dump_flags_t flags)
{
tree label = gimple_goto_dest (gs);
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%T>", gs, label);
else
dump_gimple_fmt (buffer, spc, flags, "goto %T;", label);
}
/* Dump a GIMPLE_BIND tuple on the pretty_printer BUFFER, SPC
spaces of indent. FLAGS specifies details to show in the dump (see
TDF_* in dumpfile.h). */
static void
dump_gimple_bind (pretty_printer *buffer, gbind *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <", gs);
else
pp_left_brace (buffer);
if (!(flags & TDF_SLIM))
{
tree var;
for (var = gimple_bind_vars (gs); var; var = DECL_CHAIN (var))
{
newline_and_indent (buffer, 2);
print_declaration (buffer, var, spc, flags);
}
if (gimple_bind_vars (gs))
pp_newline (buffer);
}
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_bind_body (gs), spc + 2, flags);
newline_and_indent (buffer, spc);
if (flags & TDF_RAW)
pp_greater (buffer);
else
pp_right_brace (buffer);
}
/* Dump a GIMPLE_TRY tuple on the pretty_printer BUFFER, SPC spaces of
indent. FLAGS specifies details to show in the dump (see TDF_* in
dumpfile.h). */
static void
dump_gimple_try (pretty_printer *buffer, gtry *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
{
const char *type;
if (gimple_try_kind (gs) == GIMPLE_TRY_CATCH)
type = "GIMPLE_TRY_CATCH";
else if (gimple_try_kind (gs) == GIMPLE_TRY_FINALLY)
type = "GIMPLE_TRY_FINALLY";
else
type = "UNKNOWN GIMPLE_TRY";
dump_gimple_fmt (buffer, spc, flags,
"%G <%s,%+EVAL <%S>%nCLEANUP <%S>%->", gs, type,
gimple_try_eval (gs), gimple_try_cleanup (gs));
}
else
{
pp_string (buffer, "try");
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_try_eval (gs), spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
if (gimple_try_kind (gs) == GIMPLE_TRY_CATCH)
{
newline_and_indent (buffer, spc);
pp_string (buffer, "catch");
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
}
else if (gimple_try_kind (gs) == GIMPLE_TRY_FINALLY)
{
newline_and_indent (buffer, spc);
pp_string (buffer, "finally");
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
}
else
pp_string (buffer, " <UNKNOWN GIMPLE_TRY> {");
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_try_cleanup (gs), spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
}
/* Dump a GIMPLE_CATCH tuple on the pretty_printer BUFFER, SPC spaces of
indent. FLAGS specifies details to show in the dump (see TDF_* in
dumpfile.h). */
static void
dump_gimple_catch (pretty_printer *buffer, gcatch *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%T, %+CATCH <%S>%->", gs,
gimple_catch_types (gs), gimple_catch_handler (gs));
else
dump_gimple_fmt (buffer, spc, flags, "catch (%T)%+{%S}",
gimple_catch_types (gs), gimple_catch_handler (gs));
}
/* Dump a GIMPLE_EH_FILTER tuple on the pretty_printer BUFFER, SPC spaces of
indent. FLAGS specifies details to show in the dump (see TDF_* in
dumpfile.h). */
static void
dump_gimple_eh_filter (pretty_printer *buffer, geh_filter *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%T, %+FAILURE <%S>%->", gs,
gimple_eh_filter_types (gs),
gimple_eh_filter_failure (gs));
else
dump_gimple_fmt (buffer, spc, flags, "<<<eh_filter (%T)>>>%+{%+%S%-}",
gimple_eh_filter_types (gs),
gimple_eh_filter_failure (gs));
}
/* Dump a GIMPLE_EH_MUST_NOT_THROW tuple. */
static void
dump_gimple_eh_must_not_throw (pretty_printer *buffer,
geh_mnt *gs, int spc, dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%T>", gs,
gimple_eh_must_not_throw_fndecl (gs));
else
dump_gimple_fmt (buffer, spc, flags, "<<<eh_must_not_throw (%T)>>>",
gimple_eh_must_not_throw_fndecl (gs));
}
/* Dump a GIMPLE_EH_ELSE tuple on the pretty_printer BUFFER, SPC spaces of
indent. FLAGS specifies details to show in the dump (see TDF_* in
dumpfile.h). */
static void
dump_gimple_eh_else (pretty_printer *buffer, geh_else *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags,
"%G <%+N_BODY <%S>%nE_BODY <%S>%->", gs,
gimple_eh_else_n_body (gs), gimple_eh_else_e_body (gs));
else
dump_gimple_fmt (buffer, spc, flags,
"<<<if_normal_exit>>>%+{%S}%-<<<else_eh_exit>>>%+{%S}",
gimple_eh_else_n_body (gs), gimple_eh_else_e_body (gs));
}
/* Dump a GIMPLE_RESX tuple on the pretty_printer BUFFER, SPC spaces of
indent. FLAGS specifies details to show in the dump (see TDF_* in
dumpfile.h). */
static void
dump_gimple_resx (pretty_printer *buffer, gresx *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%d>", gs,
gimple_resx_region (gs));
else
dump_gimple_fmt (buffer, spc, flags, "resx %d", gimple_resx_region (gs));
}
/* Dump a GIMPLE_EH_DISPATCH tuple on the pretty_printer BUFFER. */
static void
dump_gimple_eh_dispatch (pretty_printer *buffer, geh_dispatch *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%d>", gs,
gimple_eh_dispatch_region (gs));
else
dump_gimple_fmt (buffer, spc, flags, "eh_dispatch %d",
gimple_eh_dispatch_region (gs));
}
/* Dump a GIMPLE_DEBUG tuple on the pretty_printer BUFFER, SPC spaces
of indent. FLAGS specifies details to show in the dump (see TDF_*
in dumpfile.h). */
static void
dump_gimple_debug (pretty_printer *buffer, gdebug *gs, int spc,
dump_flags_t flags)
{
switch (gs->subcode)
{
case GIMPLE_DEBUG_BIND:
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G BIND <%T, %T>", gs,
gimple_debug_bind_get_var (gs),
gimple_debug_bind_get_value (gs));
else
dump_gimple_fmt (buffer, spc, flags, "# DEBUG %T => %T",
gimple_debug_bind_get_var (gs),
gimple_debug_bind_get_value (gs));
break;
case GIMPLE_DEBUG_SOURCE_BIND:
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G SRCBIND <%T, %T>", gs,
gimple_debug_source_bind_get_var (gs),
gimple_debug_source_bind_get_value (gs));
else
dump_gimple_fmt (buffer, spc, flags, "# DEBUG %T s=> %T",
gimple_debug_source_bind_get_var (gs),
gimple_debug_source_bind_get_value (gs));
break;
case GIMPLE_DEBUG_BEGIN_STMT:
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G BEGIN_STMT", gs);
else
dump_gimple_fmt (buffer, spc, flags, "# DEBUG BEGIN_STMT");
break;
case GIMPLE_DEBUG_INLINE_ENTRY:
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G INLINE_ENTRY %T", gs,
gimple_block (gs)
? block_ultimate_origin (gimple_block (gs))
: NULL_TREE);
else
dump_gimple_fmt (buffer, spc, flags, "# DEBUG INLINE_ENTRY %T",
gimple_block (gs)
? block_ultimate_origin (gimple_block (gs))
: NULL_TREE);
break;
default:
gcc_unreachable ();
}
}
/* Dump a GIMPLE_OMP_FOR tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_for (pretty_printer *buffer, gomp_for *gs, int spc,
dump_flags_t flags)
{
size_t i;
if (flags & TDF_RAW)
{
const char *kind;
switch (gimple_omp_for_kind (gs))
{
case GF_OMP_FOR_KIND_FOR:
kind = "";
break;
case GF_OMP_FOR_KIND_DISTRIBUTE:
kind = " distribute";
break;
case GF_OMP_FOR_KIND_TASKLOOP:
kind = " taskloop";
break;
case GF_OMP_FOR_KIND_OACC_LOOP:
kind = " oacc_loop";
break;
case GF_OMP_FOR_KIND_SIMD:
kind = " simd";
break;
default:
gcc_unreachable ();
}
dump_gimple_fmt (buffer, spc, flags, "%G%s <%+BODY <%S>%nCLAUSES <", gs,
kind, gimple_omp_body (gs));
dump_omp_clauses (buffer, gimple_omp_for_clauses (gs), spc, flags);
dump_gimple_fmt (buffer, spc, flags, " >,");
for (i = 0; i < gimple_omp_for_collapse (gs); i++)
dump_gimple_fmt (buffer, spc, flags,
"%+%T, %T, %T, %s, %T,%n",
gimple_omp_for_index (gs, i),
gimple_omp_for_initial (gs, i),
gimple_omp_for_final (gs, i),
get_tree_code_name (gimple_omp_for_cond (gs, i)),
gimple_omp_for_incr (gs, i));
dump_gimple_fmt (buffer, spc, flags, "PRE_BODY <%S>%->",
gimple_omp_for_pre_body (gs));
}
else
{
switch (gimple_omp_for_kind (gs))
{
case GF_OMP_FOR_KIND_FOR:
pp_string (buffer, "#pragma omp for");
break;
case GF_OMP_FOR_KIND_DISTRIBUTE:
pp_string (buffer, "#pragma omp distribute");
break;
case GF_OMP_FOR_KIND_TASKLOOP:
pp_string (buffer, "#pragma omp taskloop");
break;
case GF_OMP_FOR_KIND_OACC_LOOP:
pp_string (buffer, "#pragma acc loop");
break;
case GF_OMP_FOR_KIND_SIMD:
pp_string (buffer, "#pragma omp simd");
break;
case GF_OMP_FOR_KIND_GRID_LOOP:
pp_string (buffer, "#pragma omp for grid_loop");
break;
default:
gcc_unreachable ();
}
dump_omp_clauses (buffer, gimple_omp_for_clauses (gs), spc, flags);
for (i = 0; i < gimple_omp_for_collapse (gs); i++)
{
if (i)
spc += 2;
newline_and_indent (buffer, spc);
pp_string (buffer, "for (");
dump_generic_node (buffer, gimple_omp_for_index (gs, i), spc,
flags, false);
pp_string (buffer, " = ");
dump_generic_node (buffer, gimple_omp_for_initial (gs, i), spc,
flags, false);
pp_string (buffer, "; ");
dump_generic_node (buffer, gimple_omp_for_index (gs, i), spc,
flags, false);
pp_space (buffer);
switch (gimple_omp_for_cond (gs, i))
{
case LT_EXPR:
pp_less (buffer);
break;
case GT_EXPR:
pp_greater (buffer);
break;
case LE_EXPR:
pp_less_equal (buffer);
break;
case GE_EXPR:
pp_greater_equal (buffer);
break;
case NE_EXPR:
pp_string (buffer, "!=");
break;
default:
gcc_unreachable ();
}
pp_space (buffer);
dump_generic_node (buffer, gimple_omp_for_final (gs, i), spc,
flags, false);
pp_string (buffer, "; ");
dump_generic_node (buffer, gimple_omp_for_index (gs, i), spc,
flags, false);
pp_string (buffer, " = ");
dump_generic_node (buffer, gimple_omp_for_incr (gs, i), spc,
flags, false);
pp_right_paren (buffer);
}
if (!gimple_seq_empty_p (gimple_omp_body (gs)))
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
}
}
/* Dump a GIMPLE_OMP_CONTINUE tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_continue (pretty_printer *buffer, gomp_continue *gs,
int spc, dump_flags_t flags)
{
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <%T, %T>", gs,
gimple_omp_continue_control_def (gs),
gimple_omp_continue_control_use (gs));
}
else
{
pp_string (buffer, "#pragma omp continue (");
dump_generic_node (buffer, gimple_omp_continue_control_def (gs),
spc, flags, false);
pp_comma (buffer);
pp_space (buffer);
dump_generic_node (buffer, gimple_omp_continue_control_use (gs),
spc, flags, false);
pp_right_paren (buffer);
}
}
/* Dump a GIMPLE_OMP_SINGLE tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_single (pretty_printer *buffer, gomp_single *gs,
int spc, dump_flags_t flags)
{
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S>%nCLAUSES <", gs,
gimple_omp_body (gs));
dump_omp_clauses (buffer, gimple_omp_single_clauses (gs), spc, flags);
dump_gimple_fmt (buffer, spc, flags, " >");
}
else
{
pp_string (buffer, "#pragma omp single");
dump_omp_clauses (buffer, gimple_omp_single_clauses (gs), spc, flags);
if (!gimple_seq_empty_p (gimple_omp_body (gs)))
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
}
}
/* Dump a GIMPLE_OMP_TARGET tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_target (pretty_printer *buffer, gomp_target *gs,
int spc, dump_flags_t flags)
{
const char *kind;
switch (gimple_omp_target_kind (gs))
{
case GF_OMP_TARGET_KIND_REGION:
kind = "";
break;
case GF_OMP_TARGET_KIND_DATA:
kind = " data";
break;
case GF_OMP_TARGET_KIND_UPDATE:
kind = " update";
break;
case GF_OMP_TARGET_KIND_ENTER_DATA:
kind = " enter data";
break;
case GF_OMP_TARGET_KIND_EXIT_DATA:
kind = " exit data";
break;
case GF_OMP_TARGET_KIND_OACC_KERNELS:
kind = " oacc_kernels";
break;
case GF_OMP_TARGET_KIND_OACC_PARALLEL:
kind = " oacc_parallel";
break;
case GF_OMP_TARGET_KIND_OACC_DATA:
kind = " oacc_data";
break;
case GF_OMP_TARGET_KIND_OACC_UPDATE:
kind = " oacc_update";
break;
case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
kind = " oacc_enter_exit_data";
break;
case GF_OMP_TARGET_KIND_OACC_DECLARE:
kind = " oacc_declare";
break;
case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
kind = " oacc_host_data";
break;
default:
gcc_unreachable ();
}
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G%s <%+BODY <%S>%nCLAUSES <", gs,
kind, gimple_omp_body (gs));
dump_omp_clauses (buffer, gimple_omp_target_clauses (gs), spc, flags);
dump_gimple_fmt (buffer, spc, flags, " >, %T, %T%n>",
gimple_omp_target_child_fn (gs),
gimple_omp_target_data_arg (gs));
}
else
{
pp_string (buffer, "#pragma omp target");
pp_string (buffer, kind);
dump_omp_clauses (buffer, gimple_omp_target_clauses (gs), spc, flags);
if (gimple_omp_target_child_fn (gs))
{
pp_string (buffer, " [child fn: ");
dump_generic_node (buffer, gimple_omp_target_child_fn (gs),
spc, flags, false);
pp_string (buffer, " (");
if (gimple_omp_target_data_arg (gs))
dump_generic_node (buffer, gimple_omp_target_data_arg (gs),
spc, flags, false);
else
pp_string (buffer, "???");
pp_string (buffer, ")]");
}
gimple_seq body = gimple_omp_body (gs);
if (body && gimple_code (gimple_seq_first_stmt (body)) != GIMPLE_BIND)
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, body, spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
else if (body)
{
pp_newline (buffer);
dump_gimple_seq (buffer, body, spc + 2, flags);
}
}
}
/* Dump a GIMPLE_OMP_TEAMS tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_teams (pretty_printer *buffer, gomp_teams *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S>%nCLAUSES <", gs,
gimple_omp_body (gs));
dump_omp_clauses (buffer, gimple_omp_teams_clauses (gs), spc, flags);
dump_gimple_fmt (buffer, spc, flags, " >");
}
else
{
pp_string (buffer, "#pragma omp teams");
dump_omp_clauses (buffer, gimple_omp_teams_clauses (gs), spc, flags);
if (!gimple_seq_empty_p (gimple_omp_body (gs)))
{
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '{');
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '}');
}
}
}
/* Dump a GIMPLE_OMP_SECTIONS tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_sections (pretty_printer *buffer, gomp_sections *gs,
int spc, dump_flags_t flags)
{
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S>%nCLAUSES <", gs,
gimple_omp_body (gs));
dump_omp_clauses (buffer, gimple_omp_sections_clauses (gs), spc, flags);
dump_gimple_fmt (buffer, spc, flags, " >");
}
else
{
pp_string (buffer, "#pragma omp sections");
if (gimple_omp_sections_control (gs))
{
pp_string (buffer, " <");
dump_generic_node (buffer, gimple_omp_sections_control (gs), spc,
flags, false);
pp_greater (buffer);
}
dump_omp_clauses (buffer, gimple_omp_sections_clauses (gs), spc, flags);
if (!gimple_seq_empty_p (gimple_omp_body (gs)))
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
}
}
/* Dump a GIMPLE_OMP_{MASTER,TASKGROUP,ORDERED,SECTION} tuple on the
pretty_printer BUFFER. */
static void
dump_gimple_omp_block (pretty_printer *buffer, gimple *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S> >", gs,
gimple_omp_body (gs));
else
{
switch (gimple_code (gs))
{
case GIMPLE_OMP_MASTER:
pp_string (buffer, "#pragma omp master");
break;
case GIMPLE_OMP_TASKGROUP:
pp_string (buffer, "#pragma omp taskgroup");
break;
case GIMPLE_OMP_SECTION:
pp_string (buffer, "#pragma omp section");
break;
case GIMPLE_OMP_GRID_BODY:
pp_string (buffer, "#pragma omp gridified body");
break;
default:
gcc_unreachable ();
}
if (!gimple_seq_empty_p (gimple_omp_body (gs)))
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
}
}
/* Dump a GIMPLE_OMP_CRITICAL tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_critical (pretty_printer *buffer, gomp_critical *gs,
int spc, dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S> >", gs,
gimple_omp_body (gs));
else
{
pp_string (buffer, "#pragma omp critical");
if (gimple_omp_critical_name (gs))
{
pp_string (buffer, " (");
dump_generic_node (buffer, gimple_omp_critical_name (gs), spc,
flags, false);
pp_right_paren (buffer);
}
dump_omp_clauses (buffer, gimple_omp_critical_clauses (gs), spc, flags);
if (!gimple_seq_empty_p (gimple_omp_body (gs)))
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
}
}
/* Dump a GIMPLE_OMP_ORDERED tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_ordered (pretty_printer *buffer, gomp_ordered *gs,
int spc, dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S> >", gs,
gimple_omp_body (gs));
else
{
pp_string (buffer, "#pragma omp ordered");
dump_omp_clauses (buffer, gimple_omp_ordered_clauses (gs), spc, flags);
if (!gimple_seq_empty_p (gimple_omp_body (gs)))
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
}
}
/* Dump a GIMPLE_OMP_RETURN tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_return (pretty_printer *buffer, gimple *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <nowait=%d", gs,
(int) gimple_omp_return_nowait_p (gs));
if (gimple_omp_return_lhs (gs))
dump_gimple_fmt (buffer, spc, flags, ", lhs=%T>",
gimple_omp_return_lhs (gs));
else
dump_gimple_fmt (buffer, spc, flags, ">");
}
else
{
pp_string (buffer, "#pragma omp return");
if (gimple_omp_return_nowait_p (gs))
pp_string (buffer, "(nowait)");
if (gimple_omp_return_lhs (gs))
{
pp_string (buffer, " (set ");
dump_generic_node (buffer, gimple_omp_return_lhs (gs),
spc, flags, false);
pp_character (buffer, ')');
}
}
}
/* Dump a GIMPLE_TRANSACTION tuple on the pretty_printer BUFFER. */
static void
dump_gimple_transaction (pretty_printer *buffer, gtransaction *gs,
int spc, dump_flags_t flags)
{
unsigned subcode = gimple_transaction_subcode (gs);
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags,
"%G [SUBCODE=%x,NORM=%T,UNINST=%T,OVER=%T] "
"<%+BODY <%S> >",
gs, subcode, gimple_transaction_label_norm (gs),
gimple_transaction_label_uninst (gs),
gimple_transaction_label_over (gs),
gimple_transaction_body (gs));
}
else
{
if (subcode & GTMA_IS_OUTER)
pp_string (buffer, "__transaction_atomic [[outer]]");
else if (subcode & GTMA_IS_RELAXED)
pp_string (buffer, "__transaction_relaxed");
else
pp_string (buffer, "__transaction_atomic");
subcode &= ~GTMA_DECLARATION_MASK;
if (gimple_transaction_body (gs))
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_transaction_body (gs),
spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
else
{
pp_string (buffer, " //");
if (gimple_transaction_label_norm (gs))
{
pp_string (buffer, " NORM=");
dump_generic_node (buffer, gimple_transaction_label_norm (gs),
spc, flags, false);
}
if (gimple_transaction_label_uninst (gs))
{
pp_string (buffer, " UNINST=");
dump_generic_node (buffer, gimple_transaction_label_uninst (gs),
spc, flags, false);
}
if (gimple_transaction_label_over (gs))
{
pp_string (buffer, " OVER=");
dump_generic_node (buffer, gimple_transaction_label_over (gs),
spc, flags, false);
}
if (subcode)
{
pp_string (buffer, " SUBCODE=[ ");
if (subcode & GTMA_HAVE_ABORT)
{
pp_string (buffer, "GTMA_HAVE_ABORT ");
subcode &= ~GTMA_HAVE_ABORT;
}
if (subcode & GTMA_HAVE_LOAD)
{
pp_string (buffer, "GTMA_HAVE_LOAD ");
subcode &= ~GTMA_HAVE_LOAD;
}
if (subcode & GTMA_HAVE_STORE)
{
pp_string (buffer, "GTMA_HAVE_STORE ");
subcode &= ~GTMA_HAVE_STORE;
}
if (subcode & GTMA_MAY_ENTER_IRREVOCABLE)
{
pp_string (buffer, "GTMA_MAY_ENTER_IRREVOCABLE ");
subcode &= ~GTMA_MAY_ENTER_IRREVOCABLE;
}
if (subcode & GTMA_DOES_GO_IRREVOCABLE)
{
pp_string (buffer, "GTMA_DOES_GO_IRREVOCABLE ");
subcode &= ~GTMA_DOES_GO_IRREVOCABLE;
}
if (subcode & GTMA_HAS_NO_INSTRUMENTATION)
{
pp_string (buffer, "GTMA_HAS_NO_INSTRUMENTATION ");
subcode &= ~GTMA_HAS_NO_INSTRUMENTATION;
}
if (subcode)
pp_printf (buffer, "0x%x ", subcode);
pp_right_bracket (buffer);
}
}
}
}
/* Dump a GIMPLE_ASM tuple on the pretty_printer BUFFER, SPC spaces of
indent. FLAGS specifies details to show in the dump (see TDF_* in
dumpfile.h). */
static void
dump_gimple_asm (pretty_printer *buffer, gasm *gs, int spc, dump_flags_t flags)
{
unsigned int i, n, f, fields;
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <%+STRING <%n%s%n>", gs,
gimple_asm_string (gs));
n = gimple_asm_noutputs (gs);
if (n)
{
newline_and_indent (buffer, spc + 2);
pp_string (buffer, "OUTPUT: ");
for (i = 0; i < n; i++)
{
dump_generic_node (buffer, gimple_asm_output_op (gs, i),
spc, flags, false);
if (i < n - 1)
pp_string (buffer, ", ");
}
}
n = gimple_asm_ninputs (gs);
if (n)
{
newline_and_indent (buffer, spc + 2);
pp_string (buffer, "INPUT: ");
for (i = 0; i < n; i++)
{
dump_generic_node (buffer, gimple_asm_input_op (gs, i),
spc, flags, false);
if (i < n - 1)
pp_string (buffer, ", ");
}
}
n = gimple_asm_nclobbers (gs);
if (n)
{
newline_and_indent (buffer, spc + 2);
pp_string (buffer, "CLOBBER: ");
for (i = 0; i < n; i++)
{
dump_generic_node (buffer, gimple_asm_clobber_op (gs, i),
spc, flags, false);
if (i < n - 1)
pp_string (buffer, ", ");
}
}
n = gimple_asm_nlabels (gs);
if (n)
{
newline_and_indent (buffer, spc + 2);
pp_string (buffer, "LABEL: ");
for (i = 0; i < n; i++)
{
dump_generic_node (buffer, gimple_asm_label_op (gs, i),
spc, flags, false);
if (i < n - 1)
pp_string (buffer, ", ");
}
}
newline_and_indent (buffer, spc);
pp_greater (buffer);
}
else
{
pp_string (buffer, "__asm__");
if (gimple_asm_volatile_p (gs))
pp_string (buffer, " __volatile__");
if (gimple_asm_nlabels (gs))
pp_string (buffer, " goto");
pp_string (buffer, "(\"");
pp_string (buffer, gimple_asm_string (gs));
pp_string (buffer, "\"");
if (gimple_asm_nlabels (gs))
fields = 4;
else if (gimple_asm_nclobbers (gs))
fields = 3;
else if (gimple_asm_ninputs (gs))
fields = 2;
else if (gimple_asm_noutputs (gs))
fields = 1;
else
fields = 0;
for (f = 0; f < fields; ++f)
{
pp_string (buffer, " : ");
switch (f)
{
case 0:
n = gimple_asm_noutputs (gs);
for (i = 0; i < n; i++)
{
dump_generic_node (buffer, gimple_asm_output_op (gs, i),
spc, flags, false);
if (i < n - 1)
pp_string (buffer, ", ");
}
break;
case 1:
n = gimple_asm_ninputs (gs);
for (i = 0; i < n; i++)
{
dump_generic_node (buffer, gimple_asm_input_op (gs, i),
spc, flags, false);
if (i < n - 1)
pp_string (buffer, ", ");
}
break;
case 2:
n = gimple_asm_nclobbers (gs);
for (i = 0; i < n; i++)
{
dump_generic_node (buffer, gimple_asm_clobber_op (gs, i),
spc, flags, false);
if (i < n - 1)
pp_string (buffer, ", ");
}
break;
case 3:
n = gimple_asm_nlabels (gs);
for (i = 0; i < n; i++)
{
dump_generic_node (buffer, gimple_asm_label_op (gs, i),
spc, flags, false);
if (i < n - 1)
pp_string (buffer, ", ");
}
break;
default:
gcc_unreachable ();
}
}
pp_string (buffer, ");");
}
}
/* Dump ptr_info and range_info for NODE on pretty_printer BUFFER with
SPC spaces of indent. */
static void
dump_ssaname_info (pretty_printer *buffer, tree node, int spc)
{
if (TREE_CODE (node) != SSA_NAME)
return;
if (POINTER_TYPE_P (TREE_TYPE (node))
&& SSA_NAME_PTR_INFO (node))
{
unsigned int align, misalign;
struct ptr_info_def *pi = SSA_NAME_PTR_INFO (node);
pp_string (buffer, "# PT = ");
pp_points_to_solution (buffer, &pi->pt);
newline_and_indent (buffer, spc);
if (get_ptr_info_alignment (pi, &align, &misalign))
{
pp_printf (buffer, "# ALIGN = %u, MISALIGN = %u", align, misalign);
newline_and_indent (buffer, spc);
}
}
if (!POINTER_TYPE_P (TREE_TYPE (node))
&& SSA_NAME_RANGE_INFO (node))
{
wide_int min, max, nonzero_bits;
value_range_type range_type = get_range_info (node, &min, &max);
if (range_type == VR_VARYING)
pp_printf (buffer, "# RANGE VR_VARYING");
else if (range_type == VR_RANGE || range_type == VR_ANTI_RANGE)
{
pp_printf (buffer, "# RANGE ");
pp_printf (buffer, "%s[", range_type == VR_RANGE ? "" : "~");
pp_wide_int (buffer, min, TYPE_SIGN (TREE_TYPE (node)));
pp_printf (buffer, ", ");
pp_wide_int (buffer, max, TYPE_SIGN (TREE_TYPE (node)));
pp_printf (buffer, "]");
}
nonzero_bits = get_nonzero_bits (node);
if (nonzero_bits != -1)
{
pp_string (buffer, " NONZERO ");
pp_wide_int (buffer, nonzero_bits, UNSIGNED);
}
newline_and_indent (buffer, spc);
}
}
/* As dump_ssaname_info, but dump to FILE. */
void
dump_ssaname_info_to_file (FILE *file, tree node, int spc)
{
pretty_printer buffer;
pp_needs_newline (&buffer) = true;
buffer.buffer->stream = file;
dump_ssaname_info (&buffer, node, spc);
pp_flush (&buffer);
}
/* Dump a PHI node PHI. BUFFER, SPC and FLAGS are as in pp_gimple_stmt_1.
The caller is responsible for calling pp_flush on BUFFER to finalize
pretty printer. If COMMENT is true, print this after #. */
static void
dump_gimple_phi (pretty_printer *buffer, gphi *phi, int spc, bool comment,
dump_flags_t flags)
{
size_t i;
tree lhs = gimple_phi_result (phi);
if (flags & TDF_ALIAS)
dump_ssaname_info (buffer, lhs, spc);
if (comment)
pp_string (buffer, "# ");
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%T, ", phi,
gimple_phi_result (phi));
else
{
dump_generic_node (buffer, lhs, spc, flags, false);
if (flags & TDF_GIMPLE)
pp_string (buffer, " = __PHI (");
else
pp_string (buffer, " = PHI <");
}
for (i = 0; i < gimple_phi_num_args (phi); i++)
{
if ((flags & TDF_LINENO) && gimple_phi_arg_has_location (phi, i))
dump_location (buffer, gimple_phi_arg_location (phi, i));
if (flags & TDF_GIMPLE)
{
basic_block src = gimple_phi_arg_edge (phi, i)->src;
gimple *stmt = first_stmt (src);
if (!stmt || gimple_code (stmt) != GIMPLE_LABEL)
{
pp_string (buffer, "bb_");
pp_decimal_int (buffer, src->index);
}
else
dump_generic_node (buffer, gimple_label_label (as_a <glabel *> (stmt)), 0, flags,
false);
pp_string (buffer, ": ");
}
dump_generic_node (buffer, gimple_phi_arg_def (phi, i), spc, flags,
false);
if (! (flags & TDF_GIMPLE))
{
pp_left_paren (buffer);
pp_decimal_int (buffer, gimple_phi_arg_edge (phi, i)->src->index);
pp_right_paren (buffer);
}
if (i < gimple_phi_num_args (phi) - 1)
pp_string (buffer, ", ");
}
if (flags & TDF_GIMPLE)
pp_string (buffer, ");");
else
pp_greater (buffer);
}
/* Dump a GIMPLE_OMP_PARALLEL tuple on the pretty_printer BUFFER, SPC spaces
of indent. FLAGS specifies details to show in the dump (see TDF_* in
dumpfile.h). */
static void
dump_gimple_omp_parallel (pretty_printer *buffer, gomp_parallel *gs,
int spc, dump_flags_t flags)
{
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S>%nCLAUSES <", gs,
gimple_omp_body (gs));
dump_omp_clauses (buffer, gimple_omp_parallel_clauses (gs), spc, flags);
dump_gimple_fmt (buffer, spc, flags, " >, %T, %T%n>",
gimple_omp_parallel_child_fn (gs),
gimple_omp_parallel_data_arg (gs));
}
else
{
gimple_seq body;
pp_string (buffer, "#pragma omp parallel");
dump_omp_clauses (buffer, gimple_omp_parallel_clauses (gs), spc, flags);
if (gimple_omp_parallel_child_fn (gs))
{
pp_string (buffer, " [child fn: ");
dump_generic_node (buffer, gimple_omp_parallel_child_fn (gs),
spc, flags, false);
pp_string (buffer, " (");
if (gimple_omp_parallel_data_arg (gs))
dump_generic_node (buffer, gimple_omp_parallel_data_arg (gs),
spc, flags, false);
else
pp_string (buffer, "???");
pp_string (buffer, ")]");
}
body = gimple_omp_body (gs);
if (body && gimple_code (gimple_seq_first_stmt (body)) != GIMPLE_BIND)
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, body, spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
else if (body)
{
pp_newline (buffer);
dump_gimple_seq (buffer, body, spc + 2, flags);
}
}
}
/* Dump a GIMPLE_OMP_TASK tuple on the pretty_printer BUFFER, SPC spaces
of indent. FLAGS specifies details to show in the dump (see TDF_* in
dumpfile.h). */
static void
dump_gimple_omp_task (pretty_printer *buffer, gomp_task *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S>%nCLAUSES <", gs,
gimple_omp_body (gs));
dump_omp_clauses (buffer, gimple_omp_task_clauses (gs), spc, flags);
dump_gimple_fmt (buffer, spc, flags, " >, %T, %T, %T, %T, %T%n>",
gimple_omp_task_child_fn (gs),
gimple_omp_task_data_arg (gs),
gimple_omp_task_copy_fn (gs),
gimple_omp_task_arg_size (gs),
gimple_omp_task_arg_size (gs));
}
else
{
gimple_seq body;
if (gimple_omp_task_taskloop_p (gs))
pp_string (buffer, "#pragma omp taskloop");
else
pp_string (buffer, "#pragma omp task");
dump_omp_clauses (buffer, gimple_omp_task_clauses (gs), spc, flags);
if (gimple_omp_task_child_fn (gs))
{
pp_string (buffer, " [child fn: ");
dump_generic_node (buffer, gimple_omp_task_child_fn (gs),
spc, flags, false);
pp_string (buffer, " (");
if (gimple_omp_task_data_arg (gs))
dump_generic_node (buffer, gimple_omp_task_data_arg (gs),
spc, flags, false);
else
pp_string (buffer, "???");
pp_string (buffer, ")]");
}
body = gimple_omp_body (gs);
if (body && gimple_code (gimple_seq_first_stmt (body)) != GIMPLE_BIND)
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, body, spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
else if (body)
{
pp_newline (buffer);
dump_gimple_seq (buffer, body, spc + 2, flags);
}
}
}
/* Dump a GIMPLE_OMP_ATOMIC_LOAD tuple on the pretty_printer BUFFER, SPC
spaces of indent. FLAGS specifies details to show in the dump (see TDF_*
in dumpfile.h). */
static void
dump_gimple_omp_atomic_load (pretty_printer *buffer, gomp_atomic_load *gs,
int spc, dump_flags_t flags)
{
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <%T, %T>", gs,
gimple_omp_atomic_load_lhs (gs),
gimple_omp_atomic_load_rhs (gs));
}
else
{
pp_string (buffer, "#pragma omp atomic_load");
if (gimple_omp_atomic_seq_cst_p (gs))
pp_string (buffer, " seq_cst");
if (gimple_omp_atomic_need_value_p (gs))
pp_string (buffer, " [needed]");
newline_and_indent (buffer, spc + 2);
dump_generic_node (buffer, gimple_omp_atomic_load_lhs (gs),
spc, flags, false);
pp_space (buffer);
pp_equal (buffer);
pp_space (buffer);
pp_star (buffer);
dump_generic_node (buffer, gimple_omp_atomic_load_rhs (gs),
spc, flags, false);
}
}
/* Dump a GIMPLE_OMP_ATOMIC_STORE tuple on the pretty_printer BUFFER, SPC
spaces of indent. FLAGS specifies details to show in the dump (see TDF_*
in dumpfile.h). */
static void
dump_gimple_omp_atomic_store (pretty_printer *buffer,
gomp_atomic_store *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <%T>", gs,
gimple_omp_atomic_store_val (gs));
}
else
{
pp_string (buffer, "#pragma omp atomic_store ");
if (gimple_omp_atomic_seq_cst_p (gs))
pp_string (buffer, "seq_cst ");
if (gimple_omp_atomic_need_value_p (gs))
pp_string (buffer, "[needed] ");
pp_left_paren (buffer);
dump_generic_node (buffer, gimple_omp_atomic_store_val (gs),
spc, flags, false);
pp_right_paren (buffer);
}
}
/* Dump all the memory operands for statement GS. BUFFER, SPC and
FLAGS are as in pp_gimple_stmt_1. */
static void
dump_gimple_mem_ops (pretty_printer *buffer, gimple *gs, int spc,
dump_flags_t flags)
{
tree vdef = gimple_vdef (gs);
tree vuse = gimple_vuse (gs);
if (vdef != NULL_TREE)
{
pp_string (buffer, "# ");
dump_generic_node (buffer, vdef, spc + 2, flags, false);
pp_string (buffer, " = VDEF <");
dump_generic_node (buffer, vuse, spc + 2, flags, false);
pp_greater (buffer);
newline_and_indent (buffer, spc);
}
else if (vuse != NULL_TREE)
{
pp_string (buffer, "# VUSE <");
dump_generic_node (buffer, vuse, spc + 2, flags, false);
pp_greater (buffer);
newline_and_indent (buffer, spc);
}
}
/* Print the gimple statement GS on the pretty printer BUFFER, SPC
spaces of indent. FLAGS specifies details to show in the dump (see
TDF_* in dumpfile.h). The caller is responsible for calling
pp_flush on BUFFER to finalize the pretty printer. */
void
pp_gimple_stmt_1 (pretty_printer *buffer, gimple *gs, int spc,
dump_flags_t flags)
{
if (!gs)
return;
if (flags & TDF_STMTADDR)
pp_printf (buffer, "<&%p> ", (void *) gs);
if ((flags & TDF_LINENO) && gimple_has_location (gs))
dump_location (buffer, gimple_location (gs));
if (flags & TDF_EH)
{
int lp_nr = lookup_stmt_eh_lp (gs);
if (lp_nr > 0)
pp_printf (buffer, "[LP %d] ", lp_nr);
else if (lp_nr < 0)
pp_printf (buffer, "[MNT %d] ", -lp_nr);
}
if ((flags & (TDF_VOPS|TDF_MEMSYMS))
&& gimple_has_mem_ops (gs))
dump_gimple_mem_ops (buffer, gs, spc, flags);
if (gimple_has_lhs (gs)
&& (flags & TDF_ALIAS))
dump_ssaname_info (buffer, gimple_get_lhs (gs), spc);
switch (gimple_code (gs))
{
case GIMPLE_ASM:
dump_gimple_asm (buffer, as_a <gasm *> (gs), spc, flags);
break;
case GIMPLE_ASSIGN:
dump_gimple_assign (buffer, as_a <gassign *> (gs), spc, flags);
break;
case GIMPLE_BIND:
dump_gimple_bind (buffer, as_a <gbind *> (gs), spc, flags);
break;
case GIMPLE_CALL:
dump_gimple_call (buffer, as_a <gcall *> (gs), spc, flags);
break;
case GIMPLE_COND:
dump_gimple_cond (buffer, as_a <gcond *> (gs), spc, flags);
break;
case GIMPLE_LABEL:
dump_gimple_label (buffer, as_a <glabel *> (gs), spc, flags);
break;
case GIMPLE_GOTO:
dump_gimple_goto (buffer, as_a <ggoto *> (gs), spc, flags);
break;
case GIMPLE_NOP:
pp_string (buffer, "GIMPLE_NOP");
break;
case GIMPLE_RETURN:
dump_gimple_return (buffer, as_a <greturn *> (gs), spc, flags);
break;
case GIMPLE_SWITCH:
dump_gimple_switch (buffer, as_a <gswitch *> (gs), spc, flags);
break;
case GIMPLE_TRY:
dump_gimple_try (buffer, as_a <gtry *> (gs), spc, flags);
break;
case GIMPLE_PHI:
dump_gimple_phi (buffer, as_a <gphi *> (gs), spc, false, flags);
break;
case GIMPLE_OMP_PARALLEL:
dump_gimple_omp_parallel (buffer, as_a <gomp_parallel *> (gs), spc,
flags);
break;
case GIMPLE_OMP_TASK:
dump_gimple_omp_task (buffer, as_a <gomp_task *> (gs), spc, flags);
break;
case GIMPLE_OMP_ATOMIC_LOAD:
dump_gimple_omp_atomic_load (buffer, as_a <gomp_atomic_load *> (gs),
spc, flags);
break;
case GIMPLE_OMP_ATOMIC_STORE:
dump_gimple_omp_atomic_store (buffer,
as_a <gomp_atomic_store *> (gs),
spc, flags);
break;
case GIMPLE_OMP_FOR:
dump_gimple_omp_for (buffer, as_a <gomp_for *> (gs), spc, flags);
break;
case GIMPLE_OMP_CONTINUE:
dump_gimple_omp_continue (buffer, as_a <gomp_continue *> (gs), spc,
flags);
break;
case GIMPLE_OMP_SINGLE:
dump_gimple_omp_single (buffer, as_a <gomp_single *> (gs), spc,
flags);
break;
case GIMPLE_OMP_TARGET:
dump_gimple_omp_target (buffer, as_a <gomp_target *> (gs), spc,
flags);
break;
case GIMPLE_OMP_TEAMS:
dump_gimple_omp_teams (buffer, as_a <gomp_teams *> (gs), spc,
flags);
break;
case GIMPLE_OMP_RETURN:
dump_gimple_omp_return (buffer, gs, spc, flags);
break;
case GIMPLE_OMP_SECTIONS:
dump_gimple_omp_sections (buffer, as_a <gomp_sections *> (gs),
spc, flags);
break;
case GIMPLE_OMP_SECTIONS_SWITCH:
pp_string (buffer, "GIMPLE_SECTIONS_SWITCH");
break;
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_GRID_BODY:
dump_gimple_omp_block (buffer, gs, spc, flags);
break;
case GIMPLE_OMP_ORDERED:
dump_gimple_omp_ordered (buffer, as_a <gomp_ordered *> (gs), spc,
flags);
break;
case GIMPLE_OMP_CRITICAL:
dump_gimple_omp_critical (buffer, as_a <gomp_critical *> (gs), spc,
flags);
break;
case GIMPLE_CATCH:
dump_gimple_catch (buffer, as_a <gcatch *> (gs), spc, flags);
break;
case GIMPLE_EH_FILTER:
dump_gimple_eh_filter (buffer, as_a <geh_filter *> (gs), spc, flags);
break;
case GIMPLE_EH_MUST_NOT_THROW:
dump_gimple_eh_must_not_throw (buffer,
as_a <geh_mnt *> (gs),
spc, flags);
break;
case GIMPLE_EH_ELSE:
dump_gimple_eh_else (buffer, as_a <geh_else *> (gs), spc, flags);
break;
case GIMPLE_RESX:
dump_gimple_resx (buffer, as_a <gresx *> (gs), spc, flags);
break;
case GIMPLE_EH_DISPATCH:
dump_gimple_eh_dispatch (buffer, as_a <geh_dispatch *> (gs), spc,
flags);
break;
case GIMPLE_DEBUG:
dump_gimple_debug (buffer, as_a <gdebug *> (gs), spc, flags);
break;
case GIMPLE_PREDICT:
pp_string (buffer, "// predicted ");
if (gimple_predict_outcome (gs))
pp_string (buffer, "likely by ");
else
pp_string (buffer, "unlikely by ");
pp_string (buffer, predictor_name (gimple_predict_predictor (gs)));
pp_string (buffer, " predictor.");
break;
case GIMPLE_TRANSACTION:
dump_gimple_transaction (buffer, as_a <gtransaction *> (gs), spc,
flags);
break;
default:
GIMPLE_NIY;
}
}
/* Dumps header of basic block BB to OUTF indented by INDENT
spaces and details described by flags. */
static void
dump_gimple_bb_header (FILE *outf, basic_block bb, int indent,
dump_flags_t flags)
{
if (flags & TDF_BLOCKS)
{
if (flags & TDF_LINENO)
{
gimple_stmt_iterator gsi;
fputs (";; ", outf);
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
if (!is_gimple_debug (gsi_stmt (gsi))
&& get_lineno (gsi_stmt (gsi)) != UNKNOWN_LOCATION)
{
fprintf (outf, "%*sstarting at line %d",
indent, "", get_lineno (gsi_stmt (gsi)));
break;
}
if (bb->discriminator)
fprintf (outf, ", discriminator %i", bb->discriminator);
fputc ('\n', outf);
}
}
else
{
if (flags & TDF_GIMPLE)
fprintf (outf, "%*sbb_%d:\n", indent, "", bb->index);
else
fprintf (outf, "%*s<bb %d> %s:\n",
indent, "", bb->index, dump_profile (bb->count));
}
}
/* Dumps end of basic block BB to buffer BUFFER indented by INDENT
spaces. */
static void
dump_gimple_bb_footer (FILE *outf ATTRIBUTE_UNUSED,
basic_block bb ATTRIBUTE_UNUSED,
int indent ATTRIBUTE_UNUSED,
dump_flags_t flags ATTRIBUTE_UNUSED)
{
/* There is currently no GIMPLE-specific basic block info to dump. */
return;
}
/* Dump PHI nodes of basic block BB to BUFFER with details described
by FLAGS and indented by INDENT spaces. */
static void
dump_phi_nodes (pretty_printer *buffer, basic_block bb, int indent,
dump_flags_t flags)
{
gphi_iterator i;
for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
{
gphi *phi = i.phi ();
if (!virtual_operand_p (gimple_phi_result (phi)) || (flags & TDF_VOPS))
{
INDENT (indent);
dump_gimple_phi (buffer, phi, indent,
(flags & TDF_GIMPLE) ? false : true, flags);
pp_newline (buffer);
}
}
}
/* Dump jump to basic block BB that is represented implicitly in the cfg
to BUFFER. */
static void
pp_cfg_jump (pretty_printer *buffer, edge e, dump_flags_t flags)
{
if (flags & TDF_GIMPLE)
{
pp_string (buffer, "goto bb_");
pp_decimal_int (buffer, e->dest->index);
pp_semicolon (buffer);
}
else
{
pp_string (buffer, "goto <bb ");
pp_decimal_int (buffer, e->dest->index);
pp_greater (buffer);
pp_semicolon (buffer);
dump_edge_probability (buffer, e);
}
}
/* Dump edges represented implicitly in basic block BB to BUFFER, indented
by INDENT spaces, with details given by FLAGS. */
static void
dump_implicit_edges (pretty_printer *buffer, basic_block bb, int indent,
dump_flags_t flags)
{
edge e;
gimple *stmt;
stmt = last_stmt (bb);
if (stmt && gimple_code (stmt) == GIMPLE_COND)
{
edge true_edge, false_edge;
/* When we are emitting the code or changing CFG, it is possible that
the edges are not yet created. When we are using debug_bb in such
a situation, we do not want it to crash. */
if (EDGE_COUNT (bb->succs) != 2)
return;
extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
INDENT (indent + 2);
pp_cfg_jump (buffer, true_edge, flags);
newline_and_indent (buffer, indent);
pp_string (buffer, "else");
newline_and_indent (buffer, indent + 2);
pp_cfg_jump (buffer, false_edge, flags);
pp_newline (buffer);
return;
}
/* If there is a fallthru edge, we may need to add an artificial
goto to the dump. */
e = find_fallthru_edge (bb->succs);
if (e && e->dest != bb->next_bb)
{
INDENT (indent);
if ((flags & TDF_LINENO)
&& e->goto_locus != UNKNOWN_LOCATION)
dump_location (buffer, e->goto_locus);
pp_cfg_jump (buffer, e, flags);
pp_newline (buffer);
}
}
/* Dumps basic block BB to buffer BUFFER with details described by FLAGS and
indented by INDENT spaces. */
static void
gimple_dump_bb_buff (pretty_printer *buffer, basic_block bb, int indent,
dump_flags_t flags)
{
gimple_stmt_iterator gsi;
gimple *stmt;
int label_indent = indent - 2;
if (label_indent < 0)
label_indent = 0;
dump_phi_nodes (buffer, bb, indent, flags);
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
int curr_indent;
stmt = gsi_stmt (gsi);
curr_indent = gimple_code (stmt) == GIMPLE_LABEL ? label_indent : indent;
INDENT (curr_indent);
pp_gimple_stmt_1 (buffer, stmt, curr_indent, flags);
pp_newline_and_flush (buffer);
gcc_checking_assert (DECL_STRUCT_FUNCTION (current_function_decl));
dump_histograms_for_stmt (DECL_STRUCT_FUNCTION (current_function_decl),
pp_buffer (buffer)->stream, stmt);
}
dump_implicit_edges (buffer, bb, indent, flags);
pp_flush (buffer);
}
/* Dumps basic block BB to FILE with details described by FLAGS and
indented by INDENT spaces. */
void
gimple_dump_bb (FILE *file, basic_block bb, int indent, dump_flags_t flags)
{
dump_gimple_bb_header (file, bb, indent, flags);
if (bb->index >= NUM_FIXED_BLOCKS)
{
pretty_printer buffer;
pp_needs_newline (&buffer) = true;
buffer.buffer->stream = file;
gimple_dump_bb_buff (&buffer, bb, indent, flags);
}
dump_gimple_bb_footer (file, bb, indent, flags);
}
/* Dumps basic block BB to pretty-printer PP with default dump flags and
no indentation, for use as a label of a DOT graph record-node.
??? Should just use gimple_dump_bb_buff here, except that value profiling
histogram dumping doesn't know about pretty-printers. */
void
gimple_dump_bb_for_graph (pretty_printer *pp, basic_block bb)
{
pp_printf (pp, "<bb %d>:\n", bb->index);
pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/true);
for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
gsi_next (&gsi))
{
gphi *phi = gsi.phi ();
if (!virtual_operand_p (gimple_phi_result (phi))
|| (dump_flags & TDF_VOPS))
{
pp_bar (pp);
pp_write_text_to_stream (pp);
pp_string (pp, "# ");
pp_gimple_stmt_1 (pp, phi, 0, dump_flags);
pp_newline (pp);
pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/true);
}
}
for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
gsi_next (&gsi))
{
gimple *stmt = gsi_stmt (gsi);
pp_bar (pp);
pp_write_text_to_stream (pp);
pp_gimple_stmt_1 (pp, stmt, 0, dump_flags);
pp_newline (pp);
pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/true);
}
dump_implicit_edges (pp, bb, 0, dump_flags);
pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/true);
}
/* Handle the %G format for TEXT. Same as %K in handle_K_format in
tree-pretty-print.c but with a Gimple call statement as an argument. */
void
percent_G_format (text_info *text)
{
gcall *stmt = va_arg (*text->args_ptr, gcall*);
/* Build a call expression from the Gimple call statement and
pass it to the K formatter that knows how to format it. */
tree exp = build_vl_exp (CALL_EXPR, gimple_call_num_args (stmt) + 3);
CALL_EXPR_FN (exp) = gimple_call_fn (stmt);
TREE_TYPE (exp) = gimple_call_return_type (stmt);
CALL_EXPR_STATIC_CHAIN (exp) = gimple_call_chain (stmt);
SET_EXPR_LOCATION (exp, gimple_location (stmt));
percent_K_format (text, exp);
}
|
pr64421.c | /* PR middle-end/64421 */
/* { dg-require-effective-target vect_simd_clones } */
/* { dg-additional-options "-fopenmp-simd" } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
#include "tree-vect.h"
#pragma omp declare simd linear (y) notinbranch
int foo (int x, int y) __asm ("bar");
#pragma omp declare simd linear (y) notinbranch
int
foo (int x, int y)
{
return x + y;
}
int a[1024] = { 1, 2 };
int
main ()
{
int i;
check_vect ();
#pragma omp simd
for (i = 0; i < 1024; i++)
a[i] = foo (a[i], i);
if (a[0] != 1 || a[1] != 3)
abort ();
for (i = 2; i < 1024; i++)
if (a[i] != i)
abort ();
return 0;
}
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/LoopHint.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class VersionTuple;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
unsigned short ParenCount, BracketCount, BraceCount;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
mutable IdentifierInfo *Ident_instancetype;
/// \brief Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// \brief Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// \brief Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// \brief Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// \brief Identifier for "message".
IdentifierInfo *Ident_message;
/// \brief Identifier for "strict".
IdentifierInfo *Ident_strict;
/// \brief Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// C++0x contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_override;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// \brief When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// \brief RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
unsigned getDepth() const { return Depth; }
};
/// Factory object for creating AttributeList objects.
AttributeFactory AttrFactory;
/// \brief Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// \brief Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
bool SkipFunctionBodies;
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion
/// and balanced tokens must be handled using the specific consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.getKind() == tok::l_paren || Tok.getKind() == tok::r_paren;
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.getKind() == tok::l_square || Tok.getKind() == tok::r_square;
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.getKind() == tok::l_brace || Tok.getKind() == tok::r_brace;
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion);
}
/// \brief Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// \brief Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed);
PP.Lex(Tok);
PP.EnterToken(Next);
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
return ConsumeToken();
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount)
--ParenCount; // Don't let unbalanced )'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount)
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount)
--BraceCount; // Don't let unbalanced }'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// \brief Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// \brief Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// \brief Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// \brief Initialize all pragma handlers.
void initializePragmaHandlers();
/// \brief Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// \brief Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// \brief Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// \brief Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// \brief Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// \brief Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// \brief Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// \brief Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// \brief Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// \brief Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// \brief Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// \brief Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// \brief Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// \brief Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// \brief Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
/// \brief Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// \brief Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken(bool EnteringContext = false,
bool NeedType = false);
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(bool EnteringContext,
bool NeedType,
CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind
TryAnnotateName(bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC1);
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// \brief Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// \brief The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// \brief The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// \brief Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified);
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
private:
/// \brief RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// \brief Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// \brief Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend LLVM_CONSTEXPR SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// \brief Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
CachedTokens *Toks = nullptr)
: Param(P), Toks(Toks) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
CachedTokens *Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// \brief Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// othewise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// \brief The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// \brief Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// \brief Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// \brief Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// othewise, it is a tag declaration.
bool TemplateScope : 1;
/// \brief Whether this class is an __interface.
bool IsInterface : 1;
/// \brief The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// \brief The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// \brief RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// \brief Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// \brief Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// \brief The kind of template we are parsing.
enum {
/// \brief We are not parsing a template at all.
NonTemplate = 0,
/// \brief We are parsing a template declaration.
Template,
/// \brief We are parsing an explicit specialization.
ExplicitSpecialization,
/// \brief We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// \brief The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// \brief The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// \brief The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// \brief Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
AttributeList *AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers& VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc, if non-NULL, is filled with the location of the last token of
// the simple-asm.
ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr);
ExprResult ParseAsmStringLiteral();
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives();
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, Declarator::TheContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstraintExpression();
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
void *Info,
bool IsUnevaluated);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
std::function<void()> Completer = nullptr);
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr);
void CheckForLParenAfterColonColon();
//===--------------------------------------------------------------------===//
// C++0x 5.1.2: Lambda expressions
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro,
bool *SkippedInits = nullptr);
bool TryParseLambdaIntroducer(LambdaIntroducer &Intro);
ExprResult ParseLambdaExpressionAfterIntroducer(
LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while condition expression.
bool ParseCXXCondition(ExprResult &ExprResult, Decl *&DeclResult,
SourceLocation Loc, bool ConvertToBoolean);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator();
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
bool AllowOpenMPStandalone = false);
enum AllowedContsructsKind {
/// \brief Allow any declarations, statements, OpenMP directives.
ACK_Any,
/// \brief Allow only statements and non-standalone OpenMP directives.
ACK_StatementsOpenMPNonStandalone,
/// \brief Allow statements and all executable OpenMP directives
ACK_StatementsOpenMPAnyExecutable
};
StmtResult
ParseStatementOrDeclaration(StmtVector &Stmts, AllowedContsructsKind Allowed,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
AllowedContsructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement();
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs);
StmtResult ParseCaseStatement(bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement();
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(ExprResult &ExprResult,
Decl *&DeclResult,
SourceLocation Loc,
bool ConvertToBoolean);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
AllowedContsructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// \brief Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// \brief Parse the block; this code is always used.
IEB_Parse,
/// \brief Skip the block entirely; this code is never used.
IEB_Skip,
/// \brief Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// \brief Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// \brief The location of the initial keyword.
SourceLocation KeywordLoc;
/// \brief Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// \brief Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// \brief The name we're looking for.
UnqualifiedId Name;
/// \brief The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
AccessSpecifier& CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DSC_normal:
case DSC_class:
case DSC_top_level:
case DSC_objc_method_result:
case DSC_condition:
return false;
case DSC_template_type_arg:
case DSC_type_specifier:
case DSC_trailing:
case DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
DeclGroupPtrTy ParseDeclaration(unsigned Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs);
DeclGroupPtrTy ParseSimpleDeclaration(unsigned Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
bool RequireSemi,
ForRangeInit *FRI = nullptr);
bool MightBeDeclarator(unsigned Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, unsigned Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// \brief When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext getDeclSpecContextFromDeclaratorContext(unsigned Context);
void ParseDeclarationSpecifiers(DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(DeclSpec &DS, AccessSpecifier AS,
DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
Declarator::TheContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// \brief Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// \brief Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// \brief Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// \brief Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified);
/// \brief Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// \brief Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
/// isCXXConditionDeclaration - Disambiguates between a declaration or an
/// expression for a condition of a if/switch/while/for statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXConditionDeclaration();
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// \brief Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *HasMissingTypename = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// \brief Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier=true);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
Declarator::TheContext Context
= Declarator::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
// Check for the start of a C++11 attribute-specifier-seq in a context where
// an attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!getLangOpts().CPlusPlus11 || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!getLangOpts().CPlusPlus11)
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void handleDeclspecAlignBeforeClassKey(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
void ProhibitAttributes(ParsedAttributesWithRange &attrs) {
if (!attrs.Range.isValid()) return;
DiagnoseProhibitedAttributes(attrs);
attrs.clear();
}
void DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs);
// Forbid C++11 attributes that appear on certain syntactic
// locations which standard permits but we don't supported yet,
// for example, attributes appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &attrs);
/// \brief Skip C++11 attributes and return the end location of the last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// \brief Diagnose and skip C++11 attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// \brief Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax,
Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
void MaybeParseCXX11Attributes(Declarator &D) {
if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (getLangOpts().CPlusPlus11 &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// \brief Parses a C++-style attribute argument list. Returns true if this
/// results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// \brief Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// \brief Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(DeclSpec &DS,
unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true,
bool IdentifierRequired = false);
void ParseDirectDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
Declarator &D,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(unsigned Context, SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
void ParseInnerNamespace(std::vector<SourceLocation>& IdentLoc,
std::vector<IdentifierInfo*>& Ident,
std::vector<SourceLocation>& NamespaceLoc,
unsigned int index, SourceLocation& InlineLoc,
ParsedAttributes& attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, unsigned Context);
Decl *ParseUsingDirectiveOrDeclaration(unsigned Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
Decl **OwnedType = nullptr);
Decl *ParseUsingDirective(unsigned Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
Decl *ParseUsingDeclaration(unsigned Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, AttributeList *Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// \brief Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// \brief Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// \brief Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// \brief Parses declarative or executable directive.
///
/// \param Allowed ACK_Any, if any directives are allowed,
/// ACK_StatementsOpenMPAnyExecutable - if any executable directives are
/// allowed, ACK_StatementsOpenMPNonStandalone - if only non-standalone
/// executable directives are allowed.
///
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(AllowedContsructsKind Allowed);
/// \brief Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// \brief Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind);
/// \brief Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind);
/// \brief Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind);
/// \brief Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind);
/// \brief Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
CXXScopeSpec ReductionIdScopeSpec;
DeclarationNameInfo ReductionId;
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val;
OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
ParsedType ObjectType,
SourceLocation& TemplateKWLoc,
UnqualifiedId &Result);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(unsigned Context,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none,
AttributeList *AccessAttrs = nullptr);
Decl *ParseTemplateDeclarationOrSpecialization(unsigned Context,
SourceLocation &DeclEnd,
AccessSpecifier AS,
AttributeList *AccessAttrs);
Decl *ParseSingleDeclarationAfterTemplate(
unsigned Context,
const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams,
SourceLocation &DeclEnd,
AccessSpecifier AS=AS_none,
AttributeList *AccessAttrs = nullptr);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<Decl*> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<Decl*> &TemplateParams);
bool isStartOfTemplateTypeParameter();
Decl *ParseTemplateParameter(unsigned Depth, unsigned Position);
Decl *ParseTypeParameter(unsigned Depth, unsigned Position);
Decl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
Decl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(TemplateTy Template,
SourceLocation TemplateNameLoc,
const CXXScopeSpec &SS,
bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true);
void AnnotateTemplateIdTokenAsType();
bool IsTemplateArgumentList(unsigned Skip = 0);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(unsigned Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteNaturalLanguage() override;
};
} // end namespace clang
#endif
|
pr36802-2.c | /* PR middle-end/36802 */
extern void abort (void);
int q;
int
foo (int k)
{
int i = 6, n = 0;
omp_set_dynamic (0);
omp_set_nested (1);
#pragma omp parallel shared (i) num_threads (3)
{
int l;
if (omp_get_num_threads () != 3)
#pragma omp atomic
n += 1;
else
#pragma omp for
for (l = 0; l < 3; l++)
if (k)
#pragma omp atomic
q += i;
else
#pragma omp parallel shared (i) num_threads (4)
{
if (omp_get_num_threads () != 4)
#pragma omp atomic
n += 1;
#pragma omp critical
i += 1;
}
}
if (n == 0 && i != 6 + 3 * 4)
abort ();
return 0;
}
int
main (void)
{
foo (0);
return 0;
}
|
triplet_kpoint.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* These codes were originally parts of spglib, but only develped */
/* and used for phono3py. Therefore these were moved from spglib to */
/* phono3py. This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <stddef.h>
#include <stdlib.h>
#include <mathfunc.h>
#include <kpoint.h>
#include <kgrid.h>
#include <triplet_h/triplet.h>
#include <triplet_h/triplet_kpoint.h>
#define KPT_NUM_BZ_SEARCH_SPACE 125
static int bz_search_space[KPT_NUM_BZ_SEARCH_SPACE][3] = {
{ 0, 0, 0},
{ 0, 0, 1},
{ 0, 0, 2},
{ 0, 0, -2},
{ 0, 0, -1},
{ 0, 1, 0},
{ 0, 1, 1},
{ 0, 1, 2},
{ 0, 1, -2},
{ 0, 1, -1},
{ 0, 2, 0},
{ 0, 2, 1},
{ 0, 2, 2},
{ 0, 2, -2},
{ 0, 2, -1},
{ 0, -2, 0},
{ 0, -2, 1},
{ 0, -2, 2},
{ 0, -2, -2},
{ 0, -2, -1},
{ 0, -1, 0},
{ 0, -1, 1},
{ 0, -1, 2},
{ 0, -1, -2},
{ 0, -1, -1},
{ 1, 0, 0},
{ 1, 0, 1},
{ 1, 0, 2},
{ 1, 0, -2},
{ 1, 0, -1},
{ 1, 1, 0},
{ 1, 1, 1},
{ 1, 1, 2},
{ 1, 1, -2},
{ 1, 1, -1},
{ 1, 2, 0},
{ 1, 2, 1},
{ 1, 2, 2},
{ 1, 2, -2},
{ 1, 2, -1},
{ 1, -2, 0},
{ 1, -2, 1},
{ 1, -2, 2},
{ 1, -2, -2},
{ 1, -2, -1},
{ 1, -1, 0},
{ 1, -1, 1},
{ 1, -1, 2},
{ 1, -1, -2},
{ 1, -1, -1},
{ 2, 0, 0},
{ 2, 0, 1},
{ 2, 0, 2},
{ 2, 0, -2},
{ 2, 0, -1},
{ 2, 1, 0},
{ 2, 1, 1},
{ 2, 1, 2},
{ 2, 1, -2},
{ 2, 1, -1},
{ 2, 2, 0},
{ 2, 2, 1},
{ 2, 2, 2},
{ 2, 2, -2},
{ 2, 2, -1},
{ 2, -2, 0},
{ 2, -2, 1},
{ 2, -2, 2},
{ 2, -2, -2},
{ 2, -2, -1},
{ 2, -1, 0},
{ 2, -1, 1},
{ 2, -1, 2},
{ 2, -1, -2},
{ 2, -1, -1},
{-2, 0, 0},
{-2, 0, 1},
{-2, 0, 2},
{-2, 0, -2},
{-2, 0, -1},
{-2, 1, 0},
{-2, 1, 1},
{-2, 1, 2},
{-2, 1, -2},
{-2, 1, -1},
{-2, 2, 0},
{-2, 2, 1},
{-2, 2, 2},
{-2, 2, -2},
{-2, 2, -1},
{-2, -2, 0},
{-2, -2, 1},
{-2, -2, 2},
{-2, -2, -2},
{-2, -2, -1},
{-2, -1, 0},
{-2, -1, 1},
{-2, -1, 2},
{-2, -1, -2},
{-2, -1, -1},
{-1, 0, 0},
{-1, 0, 1},
{-1, 0, 2},
{-1, 0, -2},
{-1, 0, -1},
{-1, 1, 0},
{-1, 1, 1},
{-1, 1, 2},
{-1, 1, -2},
{-1, 1, -1},
{-1, 2, 0},
{-1, 2, 1},
{-1, 2, 2},
{-1, 2, -2},
{-1, 2, -1},
{-1, -2, 0},
{-1, -2, 1},
{-1, -2, 2},
{-1, -2, -2},
{-1, -2, -1},
{-1, -1, 0},
{-1, -1, 1},
{-1, -1, 2},
{-1, -1, -2},
{-1, -1, -1}
};
static void grid_point_to_address_double(int address_double[3],
const size_t grid_point,
const int mesh[3],
const int is_shift[3]);
static size_t get_ir_triplets_at_q(size_t *map_triplets,
size_t *map_q,
int (*grid_address)[3],
const size_t grid_point,
const int mesh[3],
const MatINT * rot_reciprocal,
const int swappable);
static size_t get_BZ_triplets_at_q(size_t (*triplets)[3],
const size_t grid_point,
TPLCONST int (*bz_grid_address)[3],
const size_t *bz_map,
const size_t *map_triplets,
const size_t num_map_triplets,
const int mesh[3]);
static int get_third_q_of_triplets_at_q(int bz_address[3][3],
const int q_index,
const size_t *bz_map,
const int mesh[3],
const int bzmesh[3]);
static void modulo_i3(int v[3], const int m[3]);
size_t tpk_get_ir_triplets_at_q(size_t *map_triplets,
size_t *map_q,
int (*grid_address)[3],
const int grid_point,
const int mesh[3],
const int is_time_reversal,
const MatINT * rotations,
const int swappable)
{
int num_ir;
MatINT *rot_reciprocal;
rot_reciprocal = kpt_get_point_group_reciprocal(rotations, is_time_reversal);
num_ir = get_ir_triplets_at_q(map_triplets,
map_q,
grid_address,
grid_point,
mesh,
rot_reciprocal,
swappable);
mat_free_MatINT(rot_reciprocal);
return num_ir;
}
size_t tpk_get_BZ_triplets_at_q(size_t (*triplets)[3],
const size_t grid_point,
TPLCONST int (*bz_grid_address)[3],
const size_t *bz_map,
const size_t *map_triplets,
const size_t num_map_triplets,
const int mesh[3])
{
return get_BZ_triplets_at_q(triplets,
grid_point,
bz_grid_address,
bz_map,
map_triplets,
num_map_triplets,
mesh);
}
static size_t get_ir_triplets_at_q(size_t *map_triplets,
size_t *map_q,
int (*grid_address)[3],
const size_t grid_point,
const int mesh[3],
const MatINT * rot_reciprocal,
const int swappable)
{
size_t i, j, num_grid, q_2, num_ir_q, num_ir_triplets, ir_grid_point;
int mesh_double[3], is_shift[3];
int address_double0[3], address_double1[3], address_double2[3];
size_t *ir_grid_points, *third_q;
double tolerance;
double stabilizer_q[1][3];
MatINT *rot_reciprocal_q;
ir_grid_points = NULL;
third_q = NULL;
rot_reciprocal_q = NULL;
tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]);
num_grid = mesh[0] * mesh[1] * (size_t)mesh[2];
for (i = 0; i < 3; i++) {
/* Only consider the gamma-point */
is_shift[i] = 0;
mesh_double[i] = mesh[i] * 2;
}
/* Search irreducible q-points (map_q) with a stabilizer */
/* q */
grid_point_to_address_double(address_double0, grid_point, mesh, is_shift);
for (i = 0; i < 3; i++) {
stabilizer_q[0][i] =
(double)address_double0[i] / mesh_double[i] - (address_double0[i] > mesh[i]);
}
rot_reciprocal_q = kpt_get_point_group_reciprocal_with_q(rot_reciprocal,
tolerance,
1,
stabilizer_q);
num_ir_q = kpt_get_dense_irreducible_reciprocal_mesh(grid_address,
map_q,
mesh,
is_shift,
rot_reciprocal_q);
mat_free_MatINT(rot_reciprocal_q);
rot_reciprocal_q = NULL;
third_q = (size_t*) malloc(sizeof(size_t) * num_ir_q);
ir_grid_points = (size_t*) malloc(sizeof(size_t) * num_ir_q);
num_ir_q = 0;
for (i = 0; i < num_grid; i++) {
if (map_q[i] == i) {
ir_grid_points[num_ir_q] = i;
num_ir_q++;
}
}
for (i = 0; i < num_grid; i++) {
map_triplets[i] = num_grid; /* When not found, map_triplets == num_grid */
}
#pragma omp parallel for private(j, address_double1, address_double2)
for (i = 0; i < num_ir_q; i++) {
grid_point_to_address_double(address_double1,
ir_grid_points[i],
mesh,
is_shift); /* q' */
for (j = 0; j < 3; j++) { /* q'' */
address_double2[j] = - address_double0[j] - address_double1[j];
}
third_q[i] = kgd_get_dense_grid_point_double_mesh(address_double2, mesh);
}
num_ir_triplets = 0;
if (swappable) { /* search q1 <-> q2 */
for (i = 0; i < num_ir_q; i++) {
ir_grid_point = ir_grid_points[i];
q_2 = third_q[i];
if (map_triplets[map_q[q_2]] < num_grid) {
map_triplets[ir_grid_point] = map_triplets[map_q[q_2]];
} else {
map_triplets[ir_grid_point] = ir_grid_point;
num_ir_triplets++;
}
}
} else {
for (i = 0; i < num_ir_q; i++) {
ir_grid_point = ir_grid_points[i];
map_triplets[ir_grid_point] = ir_grid_point;
num_ir_triplets++;
}
}
#pragma omp parallel for
for (i = 0; i < num_grid; i++) {
map_triplets[i] = map_triplets[map_q[i]];
}
free(third_q);
third_q = NULL;
free(ir_grid_points);
ir_grid_points = NULL;
return num_ir_triplets;
}
static size_t get_BZ_triplets_at_q(size_t (*triplets)[3],
const size_t grid_point,
TPLCONST int (*bz_grid_address)[3],
const size_t *bz_map,
const size_t *map_triplets,
const size_t num_map_triplets,
const int mesh[3])
{
size_t i, num_ir;
int j, k;
int bz_address[3][3], bz_address_double[3], bzmesh[3];
size_t *ir_grid_points;
ir_grid_points = NULL;
for (i = 0; i < 3; i++) {
bzmesh[i] = mesh[i] * 2;
}
num_ir = 0;
ir_grid_points = (size_t*) malloc(sizeof(size_t) * num_map_triplets);
for (i = 0; i < num_map_triplets; i++) {
if (map_triplets[i] == i) {
ir_grid_points[num_ir] = i;
num_ir++;
}
}
#pragma omp parallel for private(j, k, bz_address, bz_address_double)
for (i = 0; i < num_ir; i++) {
for (j = 0; j < 3; j++) {
bz_address[0][j] = bz_grid_address[grid_point][j];
bz_address[1][j] = bz_grid_address[ir_grid_points[i]][j];
bz_address[2][j] = - bz_address[0][j] - bz_address[1][j];
}
for (j = 2; j > -1; j--) {
if (get_third_q_of_triplets_at_q(bz_address,
j,
bz_map,
mesh,
bzmesh) == 0) {
break;
}
}
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) {
bz_address_double[k] = bz_address[j][k] * 2;
}
triplets[i][j] =
bz_map[kgd_get_dense_grid_point_double_mesh(bz_address_double, bzmesh)];
}
}
free(ir_grid_points);
ir_grid_points = NULL;
return num_ir;
}
static int get_third_q_of_triplets_at_q(int bz_address[3][3],
const int q_index,
const size_t *bz_map,
const int mesh[3],
const int bzmesh[3])
{
int i, j, smallest_g, smallest_index, sum_g, delta_g[3];
size_t prod_bzmesh;
size_t bzgp[KPT_NUM_BZ_SEARCH_SPACE];
int bz_address_double[3];
prod_bzmesh = (size_t)bzmesh[0] * bzmesh[1] * bzmesh[2];
modulo_i3(bz_address[q_index], mesh);
for (i = 0; i < 3; i++) {
delta_g[i] = 0;
for (j = 0; j < 3; j++) {
delta_g[i] += bz_address[j][i];
}
delta_g[i] /= mesh[i];
}
for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) {
for (j = 0; j < 3; j++) {
bz_address_double[j] = (bz_address[q_index][j] +
bz_search_space[i][j] * mesh[j]) * 2;
}
bzgp[i] = bz_map[kgd_get_dense_grid_point_double_mesh(bz_address_double,
bzmesh)];
}
for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) {
if (bzgp[i] != prod_bzmesh) {
goto escape;
}
}
escape:
smallest_g = 4;
smallest_index = 0;
for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) {
if (bzgp[i] < prod_bzmesh) { /* q'' is in BZ */
sum_g = (abs(delta_g[0] + bz_search_space[i][0]) +
abs(delta_g[1] + bz_search_space[i][1]) +
abs(delta_g[2] + bz_search_space[i][2]));
if (sum_g < smallest_g) {
smallest_index = i;
smallest_g = sum_g;
}
}
}
for (i = 0; i < 3; i++) {
bz_address[q_index][i] += bz_search_space[smallest_index][i] * mesh[i];
}
return smallest_g;
}
static void grid_point_to_address_double(int address_double[3],
const size_t grid_point,
const int mesh[3],
const int is_shift[3])
{
int i;
int address[3];
#ifndef GRID_ORDER_XYZ
address[2] = grid_point / (mesh[0] * mesh[1]);
address[1] = (grid_point - address[2] * mesh[0] * mesh[1]) / mesh[0];
address[0] = grid_point % mesh[0];
#else
address[0] = grid_point / (mesh[1] * mesh[2]);
address[1] = (grid_point - address[0] * mesh[1] * mesh[2]) / mesh[2];
address[2] = grid_point % mesh[2];
#endif
for (i = 0; i < 3; i++) {
address_double[i] = address[i] * 2 + is_shift[i];
}
}
static void modulo_i3(int v[3], const int m[3])
{
int i;
for (i = 0; i < 3; i++) {
v[i] = v[i] % m[i];
if (v[i] < 0) {
v[i] += m[i];
}
}
}
|
clock.c | /*
EXAMPLE SOURCE :
https://forums.developer.nvidia.com/t/reading-globaltimer-register-or-calling-clock-clock64-in-loop-prevent-concurrent-kernel-execution/48600/8
https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#special-registers-clock64
https://docs.nvidia.com/cuda/inline-ptx-assembly/index.html
https://wlandau.github.io/gpu/lectures/cudac-memory/cudac-memory.pdf
generating Asm PTX code
https://developer.nvidia.com/blog/cuda-pro-tip-view-assembly-code-correlation-nsight-visual-studio-edition/
https://stackoverflow.com/questions/20482686/how-to-get-the-assembly-code-of-a-cuda-kernel
$ nvcc -ptx -o kernel.ptx kernel.cu
.func (.param .b64 func_retval0) clock64(
)
{
.reg .b64 %rd<3>;
// inline asm
mov.u64 %rd1, %clock64;
// inline asm
mov.b64 %rd2, %rd1;
st.param.b64 [func_retval0+0], %rd2;
ret;
}
OPENMP:
https://www.openmp.org/spec-html/5.0/openmpsu161.html#x200-9710003.4.2
https://gcc.gnu.org/onlinedocs/libgomp/omp_005fget_005fwtick.html#omp_005fget_005fwtick
omp_get_wtick
COMPILATION:
clang -fopenmp -fopenmp-targets=nvptx64-nvidia-cuda clock.c -o clock
THIS EXAMPLE WORKS, WITH SOME LITTLE EXTRA TIME
*/
#include <stdio.h>
#include <omp.h>
#define DELAY_VAL 10000000ULL // equiv to usec
int main(void) {
int isHost = 0;
clock_t ck_start = clock();
#pragma omp target map(from: isHost)
{ isHost = omp_is_initial_device();
for(long long int i=0;i<DELAY_VAL;i++);
}
if (isHost < 0) {
printf("Runtime error, isHost=%d\n", isHost);
}
// CHECK: Target region executed on the device
printf ("Kernel: %ld clicks.\n", clock()-ck_start);
printf("Target region executed on the %s\n", isHost ? "host" : "device");
return isHost;
}
|
dlthread_pool_test.c | #include "test.h"
#define N 1234567
#define NTHREADS 11
static void myfunc(void * ptr)
{
int x;
x = *((int*)ptr);
x += 1;
*(int*)(ptr) = x;
}
sint_t test(void)
{
int t;
int a[N];
int r[NTHREADS];
#pragma omp parallel num_threads(NTHREADS)
{
int i, j;
size_t myid = omp_get_thread_num();
r[myid] = 0;
for (j=0;j<__DLTHREAD_POOL_TS_TERM;++j) {
dlthread_pool_init(NTHREADS);
dlthread_pool_set_schedule(j);
#pragma omp for
for (i=0;i<N;++i) {
a[i] = i;
dlthread_pool_add(&myfunc,a+i);
}
dlthread_pool_finalize();
#pragma omp for
for (i=0;i<N;++i) {
if (r[myid] == 0) {
OMPTESTTRUE(a[i] == i+1,r[myid]);
}
}
}
}
for (t=0;t<NTHREADS;++t) {
TESTEQUALS(r[t],0,"%d");
}
return 0;
}
|
mmGPU.c | /*
Tempo sequencial:
real 0m31,842s
user 0m31,713s
sys 0m0,083s
real 0m30,929s
user 0m30,789s
sys 0m0,074s
real 0m31,569s
user 0m31,408s
sys 0m0,077s
real 0m30,880s
user 0m30,749s
sys 0m0,067s
real 0m31,120s
user 0m30,967s
sys 0m0,087s
Tempo Paralelo - Multicore:
real 0m9,663s
user 1m1,498s
sys 0m2,830s
real 0m9,897s
user 1m1,072s
sys 0m2,809s
real 0m9,767s
user 1m0,906s
sys 0m2,798s
real 0m9,857s
user 1m1,442s
sys 0m2,972s
real 0m9,709s
user 1m1,580s
sys 0m3,002s
Tempo paralelo - GPU
distribute
real 0m30,728s
user 0m30,683s
sys 0m0,184s
real 0m30,883s
user 0m30,952s
sys 0m0,122s
real 0m31,138s
user 0m31,304s
sys 0m0,087s
real 0m31,675s
user 0m31,759s
sys 0m0,050s
real 0m31,450s
user 0m31,600s
sys 0m0,043s
distribute parallel for
real 0m9,370s
user 0m54,341s
sys 0m0,196s
real 0m9,421s
user 0m55,248s
sys 0m0,227s
real 0m8,649s
user 0m59,349s
sys 0m0,220s
real 0m8,653s
user 0m59,911s
sys 0m0,234s
real 0m8,644s
user 0m59,108s
sys 0m0,195s
distribute parallel for simd
real 0m9,144s
user 0m56,485s
sys 0m0,878s
real 0m9,119s
user 0m58,920s
sys 0m0,247s
real 0m8,835s
user 0m57,115s
sys 0m0,208s
real 0m8,842s
user 0m57,008s
sys 0m0,170s
real 0m8,984s
user 0m56,533s
sys 0m0,232s
*/
#include <stdio.h>
#include <stdlib.h>
void mm(double *a, double *b, double *c, int width)
{
#pragma omp target map(to \
: a [0:(width * width)], b [0:(width * width)]) map(from \
: c [0:(width * width)])
#pragma omp teams distribute parallel for
for (int i = 0; i < width; i++)
{
for (int j = 0; j < width; j++)
{
double sum = 0;
for (int k = 0; k < width; k++)
{
double x = a[i * width + k];
double y = b[k * width + j];
sum += x * y;
}
c[i * width + j] = sum;
}
}
}
int main()
{
int width = 2000;
double *a = (double *)malloc(width * width * sizeof(double));
double *b = (double *)malloc(width * width * sizeof(double));
double *c = (double *)malloc(width * width * sizeof(double));
#pragma omp parallel for collapse(2)
for (int i = 0; i < width; i++)
{
for (int j = 0; j < width; j++)
{
a[i * width + j] = i;
b[i * width + j] = j;
c[i * width + j] = 0;
}
}
mm(a, b, c, width);
} |
app_phg.c | /**
* @file app_phg.c
* @brief app of phg
*
* µ¥ÏòÁ¿Óë¶àÏòÁ¿½á¹¹ÊÇͳһµÄ
*
* @author Yu Li, liyu@tjufe.edu.cn
*
* Created: 2020/12/13
* Revision: none
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <memory.h>
#include "app_phg.h"
#if OPS_USE_PHG
#ifdef DEBUG
#undef DEBUG
#endif
#define DEBUG 0
static void VECGetArray(VEC *x, double **x_array)
{
assert(x_array!=NULL);
*x_array = x->data;
return;
}
static void VECRestoreArray(VEC *x, double **x_array)
{
assert(x_array!=NULL);
x->data = *x_array;
*x_array = NULL;
return;
}
static void VECGetSizes(VEC *x, int *local_size, int *global_size, int *ncols)
{
if (local_size !=NULL) *local_size = x->map->nlocal;
if (global_size!=NULL) *global_size = x->map->nglobal;
if (ncols !=NULL) *ncols = x->nvec;
return;
}
static void phgMatDotMultiVecLocal (MAT *A, VEC *x, VEC *y)
{
assert(A->type != PHG_DESTROYED);
if (!A->assembled)
phgMatAssemble(A);
phgMatPack(A);
int nrows, ncols, *rowsSE, row;
nrows = A->rmap->nlocal;
ncols = A->cmap->nlocal;
assert(x->map->nlocal==ncols);
assert(y->map->nlocal==nrows);
/* rows start and end for local data
* the type of A->packed_ind[0] is size_t */
rowsSE = malloc((nrows+1)*sizeof(int));
for (row = 0; row < nrows+1; ++row) {
rowsSE[row] = (int)A->packed_ind[row];
}
#if DEBUG
int idx, nvec, my_rank;
my_rank = A->cmap->rank;
if (my_rank==PRINT_RANK) {
printf("[%d]: nrows = %d, ncols = %d, nnz_d = %d, nnz_o = %d\n",
my_rank, nrows, ncols, A->nnz_d, A->nnz_o);
//printf("size_t = %d, MKL_INT = %d, int = %d\n", sizeof(size_t),sizeof(MKL_INT),sizeof(int));
for (row = 0; row < nrows+1; ++row) {
printf("[%d]: rowsSE %d\n", my_rank, rowsSE[row]);
}
for (row = 0; row < nrows; ++row) {
printf("[%d]: row = %d, SE = %d,%d, nnz = %d\n",
my_rank, row, rowsSE[row], rowsSE[row+1],rowsSE[row+1]-rowsSE[row]);
for (idx = rowsSE[row]; idx < rowsSE[row+1]; ++idx) {
printf("%.4e (%d)\t", A->packed_data[idx], A->packed_cols[idx]);
}
printf("\n");
}
}
#endif
#if OPS_USE_INTEL_MKL
//mkl_set_num_threads_local(MKL_NUM_THREADS);
//#pragma omp parallel num_threads(MKL_NUM_THREADS)
//{
// int id = omp_get_thread_num();
// printf("%d thread\n",id);
//}
sparse_matrix_t csrA;
struct matrix_descr descr;
descr.type = SPARSE_MATRIX_TYPE_GENERAL;
/*
* sparse_status_t mkl_sparse_d_create_csr (
* sparse_matrix_t *A,
* const sparse_index_base_t indexing,
* const MKL_INT rows, const MKL_INT cols,
* MKL_INT *rows_start, MKL_INT *rows_end, MKL_INT *col_indx, double *values);
* sparse_status_t mkl_sparse_destroy (sparse_matrix_t A);
* sparse_status_t mkl_sparse_d_mm (
* const sparse_operation_t operation,
* const double alpha,
* const sparse_matrix_t A, const struct matrix_descr descr, const sparse_layout_t layout,
* const double *B, const MKL_INT columns, const MKL_INT ldb,
* const double beta, double *C, const MKL_INT ldc);
*/
/* in process */
mkl_sparse_d_create_csr (
&csrA,
SPARSE_INDEX_BASE_ZERO,
nrows, ncols,
rowsSE, rowsSE+1, A->packed_cols, A->packed_data);
#if OPS_USE_OMP
#pragma omp parallel num_threads(OMP_NUM_THREADS)
{
int id, length, offset;
id = omp_get_thread_num();
length = x->nvec/OMP_NUM_THREADS;
offset = length*id;
if (id < x->nvec%OMP_NUM_THREADS) {
++length; offset += id;
}
else {
offset += x->nvec%OMP_NUM_THREADS;
}
mkl_sparse_d_mm (
SPARSE_OPERATION_NON_TRANSPOSE,
1.0,
csrA, descr, SPARSE_LAYOUT_COLUMN_MAJOR,
x->data+offset*ncols, length, ncols,
0.0, y->data+offset*nrows, nrows);
}
#else
mkl_sparse_d_mm (
SPARSE_OPERATION_NON_TRANSPOSE,
1.0,
csrA, descr, SPARSE_LAYOUT_COLUMN_MAJOR,
x->data, x->nvec, ncols,
0.0, y->data, nrows);
#endif
mkl_sparse_destroy (csrA);
#else
#if 0
int *cols = A->packed_cols, i;
memset(y->data,0,nrows*x->nvec*sizeof(double));
#if OPS_USE_OMP
#pragma omp parallel for schedule(static) num_threads(OMP_NUM_THREADS)
#endif
for (i = 0; i < x->nvec; ++i) {
double *dm, *dy, *dx;
dm = A->packed_data;
dy = y->data+nrows*i;
dx = x->data+ncols*i;
int j, k;
for (k = 0; k < nrows; ++k) {
for (j = rowsSE[k]; j < rowsSE[k+1]; ++j) {
dy[k] += (*dm++)*dx[cols[j]];
}
}
}
#else
memset(y->data,0,nrows*x->nvec*sizeof(double));
/* PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat A,Mat B,Mat C) */
double r1,r2,r3,r4,*c1,*c2,*c3,*c4,aatmp;
const double *aa,*b1,*b2,*b3,*b4,*av;
const int *aj;
int cn=x->nvec,bm=ncols,am=nrows;
int am4=4*am,bm4=4*bm,col,i,j,n,ajtmp;
av = A->packed_data;
b1 = x->data; b2 = b1 + bm; b3 = b2 + bm; b4 = b3 + bm;
c1 = y->data; c2 = c1 + am; c3 = c2 + am; c4 = c3 + am;
/* 4 cols per each iteration */
for (col=0; col<cn-4; col += 4) { /* over columns of C */
for (i=0; i<am; i++) { /* over rows of C in those columns */
r1 = r2 = r3 = r4 = 0.0;
n = rowsSE[i+1] - rowsSE[i];
aj = A->packed_cols + rowsSE[i];
aa = av + rowsSE[i];
for (j=0; j<n; j++) {
aatmp = aa[j]; ajtmp = aj[j];
r1 += aatmp*b1[ajtmp];
r2 += aatmp*b2[ajtmp];
r3 += aatmp*b3[ajtmp];
r4 += aatmp*b4[ajtmp];
}
c1[i] += r1;
c2[i] += r2;
c3[i] += r3;
c4[i] += r4;
}
b1 += bm4; b2 += bm4; b3 += bm4; b4 += bm4;
c1 += am4; c2 += am4; c3 += am4; c4 += am4;
}
for (; col<cn; col++) { /* over extra columns of C */
for (i=0; i<am; i++) { /* over rows of C in those columns */
r1 = 0.0;
n = rowsSE[i+1] - rowsSE[i];
aj = A->packed_cols + rowsSE[i];
aa = av + rowsSE[i];
for (j=0; j<n; j++) {
r1 += aa[j]*b1[aj[j]];
}
c1[i] += r1;
}
b1 += bm;
c1 += am;
}
#endif
#endif
free(rowsSE);
return;
}
static void phgMatDotMultiVecRemote (MAT *A, VEC *x, VEC *y)
{
INT i, j, n, *pc, col;
FLOAT *x_data = NULL, *y_data = NULL, *dbl_ptr, *pd, *v, beta;
FLOAT *offp_data = NULL, *offp_data2 = NULL;
assert(y != NULL && x != NULL);
if (!x->assembled)
phgVecAssemble(x);
if (!y->assembled)
phgVecAssemble(y);
assert(A->type != PHG_DESTROYED);
if (!A->assembled)
phgMatAssemble(A);
phgMatPack(A);
if (A->cmap->nprocs > 1) {
offp_data = phgAlloc(A->cinfo->rsize * sizeof(*offp_data ));
if (x->nvec > 1)
offp_data2 = phgAlloc(A->cinfo->rsize * sizeof(*offp_data2));
x_data = x->data; y_data = y->data;
phgMapScatterBegin(A->cinfo, 1, x_data, offp_data);
phgMapScatterEnd (A->cinfo, 1, x_data, offp_data);
for (col = 1; col < x->nvec; ++col) {
dbl_ptr = offp_data; offp_data = offp_data2; offp_data2 = dbl_ptr;
x_data += x->map->nlocal;
phgMapScatterBegin(A->cinfo, 1, x_data, offp_data);
/* multiply with remote data */
for (i = 0, v = y_data; i < A->rmap->nlocal; i++) {
j = A->rmap->nlocal + i;
pc = A->packed_cols + A->packed_ind[j];
pd = A->packed_data + A->packed_ind[j];
n = (INT)(A->packed_ind[j + 1] - A->packed_ind[j]);
if (n == 0) {
v++;
continue;
}
beta = pd[0] * offp_data2[pc[0]];
for (j = 1; j < n; j++)
beta += pd[j] * offp_data2[pc[j]];
*(v++) += beta;
}
y_data += y->map->nlocal;
phgMapScatterEnd(A->cinfo, 1, x_data, offp_data);
}
for (i = 0, v = y_data; i < A->rmap->nlocal; i++) {
j = A->rmap->nlocal + i;
pc = A->packed_cols + A->packed_ind[j];
pd = A->packed_data + A->packed_ind[j];
n = (INT)(A->packed_ind[j + 1] - A->packed_ind[j]);
if (n == 0) {
v++;
continue;
}
beta = pd[0] * offp_data[pc[0]];
for (j = 1; j < n; j++)
beta += pd[j] * offp_data[pc[j]];
*(v++) += beta;
}
phgFree(offp_data);
if (x->nvec > 1)
phgFree(offp_data2);
}
return;
}
static void phgMatDotMultiVec (MAT *A, VEC *x, VEC *y)
{
INT i, j, n, *pc, col;
FLOAT *x_data = NULL, *y_data = NULL, *dbl_ptr, *pd, *v, beta;
FLOAT *offp_data = NULL, *offp_data2 = NULL;
assert(y != NULL && x != NULL);
if (!x->assembled)
phgVecAssemble(x);
if (!y->assembled)
phgVecAssemble(y);
assert(A->type != PHG_DESTROYED);
if (!A->assembled)
phgMatAssemble(A);
phgMatPack(A);
if (A->cmap->nprocs > 1) {
offp_data = phgAlloc(A->cinfo->rsize * sizeof(*offp_data ));
x_data = x->data; y_data = y->data;
phgMapScatterBegin(A->cinfo, 1, x_data, offp_data);
if (x->nvec > 1)
offp_data2 = phgAlloc(A->cinfo->rsize * sizeof(*offp_data2));
}
/* multiply with local data */
phgMatDotMultiVecLocal (A, x, y);
if (A->cmap->nprocs > 1) {
phgMapScatterEnd (A->cinfo, 1, x_data, offp_data);
for (col = 1; col < x->nvec; ++col) {
dbl_ptr = offp_data; offp_data = offp_data2; offp_data2 = dbl_ptr;
x_data += x->map->nlocal;
phgMapScatterBegin(A->cinfo, 1, x_data, offp_data);
/* multiply with remote data */
for (i = 0, v = y_data; i < A->rmap->nlocal; i++) {
j = A->rmap->nlocal + i;
pc = A->packed_cols + A->packed_ind[j];
pd = A->packed_data + A->packed_ind[j];
n = (INT)(A->packed_ind[j + 1] - A->packed_ind[j]);
if (n == 0) {
v++;
continue;
}
beta = pd[0] * offp_data2[pc[0]];
for (j = 1; j < n; j++)
beta += pd[j] * offp_data2[pc[j]];
*(v++) += beta;
}
y_data += y->map->nlocal;
phgMapScatterEnd(A->cinfo, 1, x_data, offp_data);
}
for (i = 0, v = y_data; i < A->rmap->nlocal; i++) {
j = A->rmap->nlocal + i;
pc = A->packed_cols + A->packed_ind[j];
pd = A->packed_data + A->packed_ind[j];
n = (INT)(A->packed_ind[j + 1] - A->packed_ind[j]);
if (n == 0) {
v++;
continue;
}
beta = pd[0] * offp_data[pc[0]];
for (j = 1; j < n; j++)
beta += pd[j] * offp_data[pc[j]];
*(v++) += beta;
}
phgFree(offp_data);
if (x->nvec > 1)
phgFree(offp_data2);
}
return;
}
static void MatView (MAT *mat, struct OPS_ *ops)
{
return;
}
/* multi-vec */
static void MultiVecCreateByMat (VEC **des_vec, int num_vec, MAT *src_mat, struct OPS_ *ops)
{
*des_vec = phgMapCreateVec(src_mat->cmap, num_vec);
return;
}
static void MultiVecDestroy (VEC **des_vec, int num_vec, struct OPS_ *ops)
{
phgVecDestroy(des_vec);
return;
}
static void MultiVecView (VEC *x, int start, int end, struct OPS_ *ops)
{
int x_nrows, x_ncols; double *x_array;
VECGetArray(x,&x_array);
VECGetSizes(x,&x_nrows,NULL,&x_ncols);
LAPACKVEC x_vec;
x_vec.nrows = x_nrows; x_vec.ncols = x_ncols;
x_vec.ldd = x_nrows; x_vec.data = x_array;
ops->lapack_ops->MultiVecView((void**)&x_vec,start,end,ops->lapack_ops);
return;
}
static void MultiVecLocalInnerProd (char nsdIP,
VEC *x, VEC *y, int is_vec, int *start, int *end,
double *inner_prod, int ldIP, struct OPS_ *ops)
{
assert(is_vec==0);
double *x_array, *y_array;
int x_nrows, x_ncols, y_nrows, y_ncols;
VECGetArray(x,&x_array); VECGetArray(y,&y_array);
VECGetSizes(x,&x_nrows,NULL,&x_ncols);
VECGetSizes(y,&y_nrows,NULL,&y_ncols);
LAPACKVEC x_vec, y_vec;
x_vec.nrows = x_nrows; y_vec.nrows = y_nrows;
x_vec.ncols = x_ncols; y_vec.ncols = y_ncols;
x_vec.ldd = x_nrows; y_vec.ldd = y_nrows;
x_vec.data = x_array; y_vec.data = y_array;
ops->lapack_ops->MultiVecLocalInnerProd(nsdIP,
(void**)&x_vec,(void**)&y_vec,is_vec,
start,end,inner_prod,ldIP,ops->lapack_ops);
VECRestoreArray(x,&x_array); VECRestoreArray(y,&y_array);
return;
}
static void MultiVecSetRandomValue (VEC *x, int start, int end, struct OPS_ *ops)
{
int nvec = x->nvec;
double *data = x->data;
x->nvec = end-start;
x->data += start*x->map->nlocal;
phgVecRandomize(x, rand());
x->nvec = nvec;
x->data = data;
return;
}
static void MultiVecAxpby (double alpha, VEC *x,
double beta, VEC *y, int *start, int *end, struct OPS_ *ops)
{
assert(end[0]-start[0]==end[1]-start[1]);
if (x==y) assert(end[0]<=start[1]||end[1]<=start[0]);
int y_nrows, y_ncols; double *y_array;
VECGetArray(y,&y_array);
VECGetSizes(y,&y_nrows,NULL,&y_ncols);
LAPACKVEC y_vec;
y_vec.nrows = y_nrows; y_vec.ncols = y_ncols;
y_vec.ldd = y_nrows; y_vec.data = y_array;
if (x==NULL) {
ops->lapack_ops->MultiVecAxpby(alpha,
NULL,beta,(void**)&y_vec,start,end,ops->lapack_ops);
}
else {
int x_nrows, x_ncols; double *x_array;
VECGetArray(x,&x_array);
VECGetSizes(x,&x_nrows,NULL,&x_ncols);
LAPACKVEC x_vec;
x_vec.nrows = x_nrows; x_vec.ncols = x_ncols;
x_vec.ldd = x_nrows; x_vec.data = x_array;
ops->lapack_ops->MultiVecAxpby(alpha,
(void**)&x_vec,beta,(void**)&y_vec,start,end,ops->lapack_ops);
VECRestoreArray(x, &x_array);
}
VECRestoreArray(y, &y_array);
return;
}
static void MatDotMultiVec (MAT *mat, VEC *x,
VEC *y, int *start, int *end, struct OPS_ *ops)
{
assert(end[0]-start[0]==end[1]-start[1]);
assert(end[0]-start[0]>=0);
if (end[0]-start[0]==0) return;
int nvec_x = x->nvec, nvec_y = y->nvec;
double *data_x = x->data, *data_y = y->data;
#if 1
x->nvec = end[0]-start[0]; y->nvec = end[1]-start[1];
x->data += start[0]*x->map->nlocal;
y->data += start[1]*y->map->nlocal;
if (0) {
/* y = mat x */
phgMatDotMultiVecLocal (mat, x, y);
/* y += mat x */
phgMatDotMultiVecRemote(mat, x, y);
}
else {
phgMatDotMultiVec(mat, x, y);
}
#else
int i;
x->nvec = 1; y->nvec = 1;
x->data += start[0]*x->map->nlocal;
y->data += start[1]*y->map->nlocal;
for (i = 0; i < end[0]-start[0]; ++i) {
phgMatVec(MAT_OP_N, 1.0, mat, x, 0.0, &y);
x->data += x->map->nlocal;
y->data += y->map->nlocal;
}
#endif
x->nvec = nvec_x; y->nvec = nvec_y;
x->data = data_x; y->data = data_y;
return;
}
static void MatTransDotMultiVec (MAT *mat, VEC *x,
VEC *y, int *start, int *end, struct OPS_ *ops)
{
assert(end[0]-start[0]==end[1]-start[1]);
int i;
int nvec_x = x->nvec, nvec_y = y->nvec;
double *data_x = x->data, *data_y = y->data;
x->nvec = 1; y->nvec = 1;
x->data += start[0]*x->map->nlocal;
y->data += start[1]*y->map->nlocal;
for (i = 0; i < end[0]-start[0]; ++i) {
phgMatVec(MAT_OP_T, 1.0, mat, x, 0.0, &y);
x->data += x->map->nlocal;
y->data += y->map->nlocal;
}
x->nvec = nvec_x; y->nvec = nvec_y;
x->data = data_x; y->data = data_y;
return;
}
static void MultiVecLinearComb (VEC *x, VEC *y, int is_vec,
int *start, int *end,
double *coef , int ldc ,
double *beta , int incb, struct OPS_ *ops)
{
assert(is_vec==0);
if (x==y) assert(end[0]<=start[1]||end[1]<=start[0]);
int y_nrows, y_ncols; double *y_array;
VECGetArray(y,&y_array);
VECGetSizes(y,&y_nrows,NULL,&y_ncols);
LAPACKVEC y_vec;
y_vec.nrows = y_nrows; y_vec.ncols = y_ncols;
y_vec.ldd = y_nrows; y_vec.data = y_array;
if (x==NULL) {
ops->lapack_ops->MultiVecLinearComb(
NULL,(void**)&y_vec,is_vec,
start,end,coef,ldc,beta,incb,ops->lapack_ops);
}
else {
int x_nrows, x_ncols;
double *x_array;
LAPACKVEC x_vec;
VECGetArray(x,&x_array);
VECGetSizes(x,&x_nrows,NULL,&x_ncols);
x_vec.nrows = x_nrows; x_vec.ncols = x_ncols;
x_vec.ldd = x_nrows; x_vec.data = x_array;
ops->lapack_ops->MultiVecLinearComb(
(void**)&x_vec,(void**)&y_vec,is_vec,
start,end,coef,ldc,beta,incb,ops->lapack_ops);
VECRestoreArray(x, &x_array);
}
VECRestoreArray(y, &y_array);
return;
}
/* Encapsulation */
static void PHG_MatView (void *mat, struct OPS_ *ops)
{
MatView((MAT*)mat,ops);
return;
}
/* multi-vec */
static void PHG_MultiVecCreateByMat (void ***des_vec, int num_vec, void *src_mat, struct OPS_ *ops)
{
MultiVecCreateByMat ((VEC**)des_vec,num_vec,(MAT*)src_mat,ops);
return;
}
static void PHG_MultiVecDestroy (void ***des_vec, int num_vec, struct OPS_ *ops)
{
MultiVecDestroy ((VEC**)des_vec,num_vec,ops);
return;
}
static void PHG_MultiVecView (void **x, int start, int end, struct OPS_ *ops)
{
MultiVecView ((VEC*)x,start,end,ops);
return;
}
static void PHG_MultiVecLocalInnerProd (char nsdIP,
void **x, void **y, int is_vec, int *start, int *end,
double *inner_prod, int ldIP, struct OPS_ *ops)
{
MultiVecLocalInnerProd (nsdIP,
(VEC*)x,(VEC*)y,is_vec,start,end,
inner_prod,ldIP,ops);
return;
}
static void PHG_MultiVecSetRandomValue (void **x, int start, int end, struct OPS_ *ops)
{
MultiVecSetRandomValue ((VEC*)x,start,end,ops);
return;
}
static void PHG_MultiVecAxpby (double alpha, void **x,
double beta, void **y, int *start, int *end, struct OPS_ *ops)
{
MultiVecAxpby (alpha,(VEC*)x,beta,(VEC*)y,start,end,ops);
return;
}
static void PHG_MatDotMultiVec (void *mat, void **x,
void **y, int *start, int *end, struct OPS_ *ops)
{
MatDotMultiVec ((MAT*)mat,(VEC*)x,(VEC*)y,start,end,ops);
return;
}
static void PHG_MatTransDotMultiVec (void *mat, void **x,
void **y, int *start, int *end, struct OPS_ *ops)
{
MatTransDotMultiVec ((MAT*)mat,(VEC*)x,(VEC*)y,start,end,ops);
return;
}
static void PHG_MultiVecLinearComb (
void **x , void **y, int is_vec,
int *start, int *end,
double *coef , int ldc ,
double *beta , int incb, struct OPS_ *ops)
{
MultiVecLinearComb (
(VEC*)x, (VEC*)y, is_vec,
start, end ,
coef , ldc ,
beta , incb, ops);
return;
}
int PHG_GetOptionFromCommandLine (
const char *name, char type, void *value,
int argc, char* argv[], struct OPS_ *ops)
{
static int *int_value; static double *dbl_value; static char *str_value;
switch (type) {
case 'i':
int_value = (int*)value;
phgOptionsRegisterInt(name, NULL, int_value);
break;
case 'f':
dbl_value = (double*)value;
phgOptionsRegisterFloat(name, NULL, dbl_value);
break;
case 's':
str_value = (char*)value;
phgOptionsRegisterString(name, NULL, &str_value);
//DefaultGetOptionFromCommandLine(name, type, value, argc, argv, ops);
break;
default:
break;
}
return 0;
}
void OPS_PHG_Set (struct OPS_ *ops)
{
//ops->GetOptionFromCommandLine = PHG_GetOptionFromCommandLine;
ops->Printf = DefaultPrintf;
ops->MatView = PHG_MatView;
/* multi-vec */
ops->MultiVecCreateByMat = PHG_MultiVecCreateByMat ;
ops->MultiVecDestroy = PHG_MultiVecDestroy ;
ops->MultiVecView = PHG_MultiVecView ;
ops->MultiVecLocalInnerProd = PHG_MultiVecLocalInnerProd;
ops->MultiVecSetRandomValue = PHG_MultiVecSetRandomValue;
ops->MultiVecAxpby = PHG_MultiVecAxpby ;
ops->MatDotMultiVec = PHG_MatDotMultiVec ;
ops->MatTransDotMultiVec = PHG_MatTransDotMultiVec ;
ops->MultiVecLinearComb = PHG_MultiVecLinearComb ;
/* multi grid */
return;
}
#endif
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
NETNTLM_bs_fmt_plug.c | /*
* NETNTLM_fmt.c -- NTLM Challenge/Response
*
* Written by JoMo-Kun <jmk at foofus.net> in 2007
* and placed in the public domain.
*
* Modified for performance, support for Extended Session Security, OMP
* and UTF-8, by magnum 2010-2011.
* Modified for using Bitsliced DES by Deepika Dutta Mishra
* <dipikadutta at gmail.com> in 2013, no rights reserved.
*
* This algorithm is designed for performing brute-force cracking of the NTLM
* (version 1) challenge/response pairs exchanged during network-based
* authentication attempts [1]. The captured challenge/response pairs from these
* attempts should be stored using the L0phtCrack 2.0 LC format, specifically:
* username:unused:unused:lm response:ntlm response:challenge. For example:
*
* CORP\Administrator:::25B2B477CE101D83648BB087CE7A1C217F51C7FC64C0EBB1:
* C8BD0C1630A9ECF7A95F494A8F0B2CB4A3F25B1225514304:1122334455667788
*
* It should be noted that a NTLM authentication response is not same as a NTLM
* password hash, which can be extracted using tools such as FgDump [2]. NTLM
* responses can be gathered via normal network capture or via tools which
* perform layer 2 attacks, such as Ettercap [3] and Cain [4]. The responses can
* also be harvested using a modified Samba service [5] in conjunction with
* some trickery to convince the user to connect to it. I leave what that
* trickery may actually be as an exercise for the reader (HINT: Karma, NMB
* broadcasts, IE, Outlook, social engineering, ...).
*
* [1] http://davenport.sourceforge.net/ntlm.html#theNtlmResponse
* [2] http://www.foofus.net/~fizzgig/fgdump/
* [3] http://ettercap.sourceforge.net/
* [4] http://www.oxid.it/cain.html
* [5] http://www.foofus.net/jmk/smbchallenge.html
*
* This version supports Extended Session Security. This is what
* is used when the "LM" hash ends in 32 zeros:
*
* DOMAIN\User:::c70e4fb229437ef300000000000000000000000000000000:
* abf7762caf2b1bbfc5cfc1f46665249f049e0af72ae5b5a9:24ca92fdab441aa4
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_NETNTLM_old;
#elif FMT_REGISTERS_H
john_register_one(&fmt_NETNTLM_old);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "DES_std.h"
#include "DES_bs.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "md5.h"
#include "unicode.h"
#include "memdbg.h"
#ifndef uchar
#define uchar unsigned char
#endif
#define FORMAT_LABEL "netntlm-naive"
#define FORMAT_NAME "NTLMv1 C/R"
#define FORMAT_TAG "$NETNTLM$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "MD4 DES (ESS MD5) " DES_BS_ALGORITHM_NAME " naive"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 24
#define BINARY_ALIGN 4
#define PARTIAL_BINARY_SIZE 8
#define SALT_SIZE 8
#define SALT_ALIGN 4
#define CIPHERTEXT_LENGTH 48
#define TOTAL_LENGTH (10 + 2 * 2 * SALT_SIZE + CIPHERTEXT_LENGTH)
#define MIN_KEYS_PER_CRYPT DES_BS_DEPTH
#define MAX_KEYS_PER_CRYPT DES_BS_DEPTH
static struct fmt_tests tests[] = {
{"", "FooBarGerg", {"User", "", "", "lm-hash", "35B62750E1B9B3205C50D6BA351092C12A1B9B3CDC65D44A", "1122334455667788"} },
{"$NETNTLM$1122334455667788$BFCCAF26128EC95F9999C9792F49434267A1D9B0EF89BFFB", "g3rg3g3rg3g3rg3"},
{"$NETNTLM$1122334455667788$E463FAA5D868ECE20CAE622474A2F440A652D642156AF863", "M1xedC4se%^&*@)##(blahblah!@#"},
{"$NETNTLM$c75c20bff9baa71f4765f360625700b0$81f5ecd8a77fe819f7f6689a08a27ac705fc2e1bb00cecb2", "password"},
{"$NETNTLM$1122334455667788$35B62750E1B9B3205C50D6BA351092C12A1B9B3CDC65D44A", "FooBarGerg"},
{"$NETNTLM$1122334455667788$A4765EBFE83D345A7CB1660B8899251905164029F8086DDE", "visit www.foofus.net"},
{"$NETNTLM$24ca92fdab441aa4c70e4fb229437ef3$abf7762caf2b1bbfc5cfc1f46665249f049e0af72ae5b5a9", "longpassword"},
{"$NETNTLM$1122334455667788$B2B2220790F40C88BCFF347C652F67A7C4A70D3BEBD70233", "cory21"},
{"", "g3rg3g3rg3g3rg3", {"User", "", "", "lm-hash", "BFCCAF26128EC95F9999C9792F49434267A1D9B0EF89BFFB", "1122334455667788"} },
{"", "M1xedC4se%^&*@)##(blahblah!@#", {"User", "", "", "lm-hash", "E463FAA5D868ECE20CAE622474A2F440A652D642156AF863", "1122334455667788"} },
{"", "visit www.foofus.net", {"User", "", "", "lm-hash", "A4765EBFE83D345A7CB1660B8899251905164029F8086DDE", "1122334455667788"} },
{"", "password", {"ESS", "", "", "4765f360625700b000000000000000000000000000000000", "81f5ecd8a77fe819f7f6689a08a27ac705fc2e1bb00cecb2", "c75c20bff9baa71f"} },
{"", "cory21", {"User", "", "", "lm-hash", "B2B2220790F40C88BCFF347C652F67A7C4A70D3BEBD70233", "1122334455667788"} },
{NULL}
};
static char (*saved_plain)[PLAINTEXT_LENGTH + 1];
static int (*saved_len);
static uchar (*output)[PARTIAL_BINARY_SIZE];
static uchar (*saved_key)[21]; // NT hash
static uchar *challenge;
static int keys_prepared;
static void set_salt(void *salt);
static void init(struct fmt_main *self)
{
/* LM =2 for DES encryption with no salt and no iterations */
DES_bs_init(2, DES_bs_cpt);
#if DES_bs_mt
self->params.min_keys_per_crypt = DES_bs_min_kpc;
self->params.max_keys_per_crypt = DES_bs_max_kpc;
#endif
saved_plain = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_plain));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
output = mem_calloc(self->params.max_keys_per_crypt, sizeof(*output));
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
}
static void done(void)
{
MEM_FREE(saved_key);
MEM_FREE(output);
MEM_FREE(saved_len);
MEM_FREE(saved_plain);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *pos;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0;
if ((strlen(ciphertext) != 74) && (strlen(ciphertext) != 90)) return 0;
if ((ciphertext[25] != '$') && (ciphertext[41] != '$')) return 0;
for (pos = &ciphertext[FORMAT_TAG_LEN]; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++);
if (*pos != '$') return 0;
for (pos++;atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++);
if (!*pos && ((pos - ciphertext - 26 == CIPHERTEXT_LENGTH) ||
(pos - ciphertext - 42 == CIPHERTEXT_LENGTH)))
return 1;
else
return 0;
}
static char *prepare(char *split_fields[10], struct fmt_main *self)
{
char *cp;
char clientChal[17];
char *srv_challenge = split_fields[3];
char *nethashv2 = split_fields[4];
char *cli_challenge = split_fields[5];
if (!strncmp(split_fields[1], FORMAT_TAG, FORMAT_TAG_LEN))
return split_fields[1];
if (!srv_challenge || !nethashv2 || !cli_challenge)
return split_fields[1];
if (strlen(nethashv2) != CIPHERTEXT_LENGTH)
return split_fields[1];
// this string suggests we have an improperly formatted NTLMv2
if (!strncmp(&nethashv2[32], "0101000000000000", 16))
return split_fields[1];
// Handle ESS (8 byte client challenge in "LM" field padded with zeros)
if (strlen(srv_challenge) == 48 && !strncmp(&srv_challenge[16],
"00000000000000000000000000000000", 32)) {
memcpy(clientChal, srv_challenge,16);
clientChal[16] = 0;
}
else
clientChal[0] = 0;
cp = mem_alloc(FORMAT_TAG_LEN+strlen(cli_challenge)+strlen(clientChal)+1+strlen(nethashv2)+1);
sprintf(cp, "%s%s%s$%s", FORMAT_TAG, cli_challenge, clientChal, nethashv2);
if (valid(cp,self)) {
char *cp2 = str_alloc_copy(cp);
MEM_FREE(cp);
return cp2;
}
MEM_FREE(cp);
return split_fields[1];
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TOTAL_LENGTH + 1];
memset(out, 0, TOTAL_LENGTH + 1);
strcpy(out, ciphertext);
strlwr(&out[FORMAT_TAG_LEN]); /* Exclude: $NETNTLM$ */
return out;
}
static uint32_t *generate_des_format(uchar* binary)
{
static uint32_t out[6];
ARCH_WORD block[6];
int chr, src,dst,i;
uchar value, mask;
ARCH_WORD *ptr;
memset(block, 0, sizeof(block));
for (chr = 0; chr < 24; chr=chr + 8)
{
dst = 0;
for (i=0; i<8; i++)
{
value = binary[chr + i];
mask = 0x80;
for (src = 0; src < 8; src++) {
if (value & mask)
block[(chr/4) + (dst>>5)] |= 1U << (dst & 0x1F);
mask >>= 1;
dst++;
}
}
}
/* Apply initial permutation on ciphertext blocks */
for (i=0; i<6; i=i+2)
{
ptr = DES_do_IP(&block[i]);
out[i] = ptr[1];
out[i+1] = ptr[0];
}
return out;
}
static void *get_binary(char *ciphertext)
{
uchar binary[BINARY_SIZE];
int i;
uint32_t *ptr;
ciphertext = strrchr(ciphertext, '$') + 1;
for (i=0; i<BINARY_SIZE; i++) {
binary[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])])<<4;
binary[i] |= (atoi16[ARCH_INDEX(ciphertext[i*2+1])]);
}
/* Set binary in DES format */
ptr = generate_des_format(binary);
return ptr;
}
inline static void setup_des_key(unsigned char key_56[], int index)
{
char key[8];
/* Right shift key bytes by 1 to bring in openssl format */
/* Each byte of key is xored with 0x80 to pass check for 0 in DES_bs_set_key() */
key[0] = (key_56[0] >> 1) | 0x80;
key[1] = (((key_56[0] << 7) | (key_56[1] >> 1)) >>1) | 0x80;
key[2] = (((key_56[1] << 6) | (key_56[2] >> 2)) >>1) | 0x80;
key[3] = (((key_56[2] << 5) | (key_56[3] >> 3)) >>1) | 0x80;
key[4] = (((key_56[3] << 4) | (key_56[4] >> 4)) >>1) | 0x80;
key[5] = (((key_56[4] << 3) | (key_56[5] >> 5)) >>1) | 0x80;
key[6] = (((key_56[5] << 2) | (key_56[6] >> 6)) >>1) | 0x80;
key[7] = ((key_56[6] << 1) >>1 ) | 0x80;
DES_bs_set_key((char*)key, index);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int i;
if (!keys_prepared) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < count; i++) {
int len;
/* Generate 16-byte NTLM hash */
len = E_md4hash((uchar *) saved_plain[i], saved_len[i],
saved_key[i]);
if (len <= 0)
saved_plain[i][-len] = 0; // match truncation
/* NULL-padding the 16-byte hash to 21-bytes is made
in cmp_exact if needed */
setup_des_key(saved_key[i], i);
}
keys_prepared = 1;
}
/* Bitsliced des encryption */
DES_bs_crypt_plain(count);
return count;
}
static int cmp_all(void *binary, int count)
{
return DES_bs_cmp_all((uint32_t *)binary, count);
}
static int cmp_one(void *binary, int index)
{
return DES_bs_cmp_one((uint32_t *)binary, 32, index);
}
static int cmp_exact(char *source, int index)
{
uint32_t *binary = get_binary(source);
if (!DES_bs_cmp_one(binary, 64, index))
return 0;
setup_des_key(&saved_key[index][7], 0);
DES_bs_crypt_plain(1);
if (!DES_bs_cmp_one(&binary[2], 64, 0))
{
setup_des_key(saved_key[0], 0);
DES_bs_crypt_plain(1);
return 0;
}
/* NULL-pad 16-byte NTLM hash to 21-bytes (postponed until now) */
memset(&saved_key[index][16], 0, 5);
setup_des_key(&saved_key[index][14], 0);
DES_bs_crypt_plain(1);
if (!DES_bs_cmp_one(&binary[4], 64, 0))
{
setup_des_key(saved_key[0], 0);
DES_bs_crypt_plain(1);
return 0;
}
setup_des_key(saved_key[0], 0);
DES_bs_crypt_plain(1);
return 1;
}
static void *get_salt(char *ciphertext)
{
static uchar *binary_salt;
int i, cnt,j;
unsigned char temp[SALT_SIZE];
if (!binary_salt) binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD);
if (ciphertext[25] == '$') {
// Server challenge
ciphertext += FORMAT_TAG_LEN;
for (i = 0; i < SALT_SIZE; ++i)
binary_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])];
} else {
uchar es_salt[2*SALT_SIZE], k1[2*SALT_SIZE];
MD5_CTX ctx;
ciphertext += FORMAT_TAG_LEN;
// Extended Session Security,
// Concatenate Server & Client challenges
for (i = 0;i < 2 * SALT_SIZE; ++i)
es_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])];
// MD5 the concatenated challenges, result is our key
MD5_Init(&ctx);
MD5_Update(&ctx, es_salt, 16);
MD5_Final((void*)k1, &ctx);
memcpy(binary_salt, k1, SALT_SIZE); // but only 8 bytes of it
}
/* Apply IP to salt */
memset(temp, 0, SALT_SIZE);
for (i = 0; i < 64; i++) {
cnt = DES_IP[i ^ 0x20];
j = (uchar)((binary_salt[cnt >> 3] >> (7 - (cnt & 7))) & 1);
temp[i/8] |= j << (7 - (i % 8));
}
memcpy(binary_salt, temp, SALT_SIZE);
return (void*)binary_salt;
}
static void set_salt(void *salt)
{
challenge = salt;
DES_bs_generate_plaintext(challenge);
}
static void netntlm_set_key(char *key, int index)
{
saved_len[index] = strnzcpyn(saved_plain[index], key, sizeof(*saved_plain));
keys_prepared = 0;
}
static char *get_key(int index)
{
return saved_plain[index];
}
static int salt_hash(void *salt)
{
return *(uint32_t *)salt & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_NETNTLM_old = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#if DES_BS
FMT_BS |
#if DES_bs_mt
FMT_OMP |
#endif
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_UNICODE | FMT_UTF8,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
netntlm_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
DES_bs_get_hash_0,
DES_bs_get_hash_1,
DES_bs_get_hash_2,
DES_bs_get_hash_3,
DES_bs_get_hash_4,
DES_bs_get_hash_5,
DES_bs_get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
omptl_algorithm_par.h | // Copyright (C) 2006-2011 Fokko Beekhof
// Email contact: Fokko.Beekhof@unige.ch
// The OMPTL library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#include <functional>
#include <utility>
#include <cmath>
#include <cstdlib>
#include "omptl_tools.h"
#include "omptl_numeric"
#include <iterator>
namespace omptl
{
/*
* Not (yet) paralellized due to data dependance.
*/
template <class ForwardIterator>
ForwardIterator adjacent_find(ForwardIterator first, ForwardIterator last,
const unsigned P)
{
return std::adjacent_find(first, last);
}
/*
* Not (yet) paralellized due to data dependance.
*/
template <class ForwardIterator, class BinaryPredicate>
ForwardIterator adjacent_find(ForwardIterator first, ForwardIterator last,
BinaryPredicate binary_pred, const unsigned P)
{
return std::adjacent_find(first, last, binary_pred);
}
template <class ForwardIterator, class T, class StrictWeakOrdering>
bool binary_search(ForwardIterator first, ForwardIterator last, const T& value,
StrictWeakOrdering comp, const unsigned P)
{
if (detail::_linear_serial_is_faster(first, last, P))
return std::binary_search(first, last, value, comp);
std::vector< std::pair<ForwardIterator, ForwardIterator> > partitions(P);
::omptl::detail::_partition_range(first, last, partitions, P);
bool result = 0;
#pragma omp parallel for reduction(|:result)
for (int t = 0; t < int(P); ++t)
result |= std::binary_search(partitions[t].first,
partitions[t].second, value, comp);
return result;
}
template <class ForwardIterator, class T>
bool binary_search(ForwardIterator first, ForwardIterator last, const T& value,
const unsigned P)
{
typedef typename std::iterator_traits<ForwardIterator>::value_type VT;
return ::omptl::binary_search(first, last, value, std::less<VT>());
}
namespace detail
{
template <class IteratorInTag, class IteratorOutTag>
struct Copy_
{
template <class IteratorIn, class IteratorOut>
static IteratorOut _copy(IteratorIn first, IteratorIn last,
IteratorOut result, const unsigned P)
{
if (detail::_linear_serial_is_faster(first, last, P))
return std::copy(first, last, result);
std::vector< std::pair<IteratorIn, IteratorIn> > source_partitions(P);
::omptl::detail::_partition_range(first, last, source_partitions, P);
std::vector<IteratorOut> dest_partitions(P);
::omptl::detail::_copy_partitions(source_partitions, result, dest_partitions, P);
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
{
IteratorOut tmp;
*( (t == int(P-1)) ? &result : &tmp )
= std::copy( source_partitions[t].first,
source_partitions[t].second,
dest_partitions[t]);
}
return result;
}
};
template <class IteratorOutTag>
struct Copy_< std::input_iterator_tag, IteratorOutTag >
{
template <class InputIterator, class OutputIterator>
static OutputIterator _copy(InputIterator first, InputIterator last,
OutputIterator result, const unsigned P)
{
return std::copy(first, last, result);
}
};
template <class IteratorInTag>
struct Copy_<IteratorInTag, std::output_iterator_tag>
{
template <class InputIterator, class OutputIterator>
static OutputIterator _copy(InputIterator first, InputIterator last,
OutputIterator result, const unsigned P)
{
return std::copy(first, last, result);
}
};
} // end namespace detail
template <class InputIterator, class OutputIterator>
OutputIterator copy(InputIterator first, InputIterator last,
OutputIterator result, const unsigned P)
{
return detail::Copy_<
typename std::iterator_traits<InputIterator>::iterator_category,
typename std::iterator_traits<OutputIterator>::iterator_category>
::_copy(first, last, result, P);
}
template <class BidirectionalIterator1, class BidirectionalIterator2>
BidirectionalIterator2 copy_backward(BidirectionalIterator1 first,
BidirectionalIterator1 last,
BidirectionalIterator2 result,
const unsigned P)
{
if (detail::_linear_serial_is_faster(first, last, P))
return std::copy_backward(first, last, result);
std::vector< std::pair<BidirectionalIterator1, BidirectionalIterator1> > source_partitions(P);
::omptl::detail::_partition_range(first, last, source_partitions, P);
std::vector<BidirectionalIterator2> dest_partitions(P);
::omptl::detail::_copy_partitions(source_partitions, result, dest_partitions, P);
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
{
BidirectionalIterator2 tmp;
*( (t == int(P-1)) ? &result : &tmp ) =
std::copy_backward( source_partitions[t].first,
source_partitions[t].second,
dest_partitions[t] );
}
return result;
}
namespace detail
{
template <class IteratorTag>
struct Count_
{
template <class Iterator, class EqualityComparable>
static typename std::iterator_traits<Iterator>::difference_type
count(Iterator first, Iterator last, const EqualityComparable& value,
const unsigned P)
{
if (detail::_linear_serial_is_faster(first, last, P))
return std::count(first, last, value);
std::vector< std::pair<Iterator, Iterator> > partitions(P);
::omptl::detail::_partition_range(first, last, partitions, P);
typename std::iterator_traits<Iterator>::difference_type result = 0;
#pragma omp parallel for reduction(+:result)
for (int t = 0; t < int(P); ++t)
result += std::count( partitions[t].first, partitions[t].second, value );
return result;
}
};
template <>
struct Count_< std::input_iterator_tag >
{
template <class Iterator, class EqualityComparable>
static typename std::iterator_traits<Iterator>::difference_type
count(Iterator first, Iterator last, const EqualityComparable& value,
const unsigned P)
{
return std::count(first, last, value);
}
};
} // end namespace detail
template <class InputIterator, class EqualityComparable>
typename std::iterator_traits<InputIterator>::difference_type
count(InputIterator first, InputIterator last,
const EqualityComparable& value, const unsigned P)
{
return detail::Count_<typename std::iterator_traits<InputIterator>::iterator_category>::
count(first, last, value, P);
}
template <class InputIterator, class EqualityComparable, class Size>
void count(InputIterator first, InputIterator last,
const EqualityComparable& value, Size& n, const unsigned P)
{
n = ::omptl::count(first, last, value, P);
}
namespace detail
{
template <class IteratorTag>
struct Count_if_
{
template <class Iterator, class Predicate>
static typename std::iterator_traits<Iterator>::difference_type
count_if(Iterator first, Iterator last, Predicate pred,
const unsigned P)
{
if (detail::_linear_serial_is_faster(first, last, P))
return std::count_if(first, last, pred);
std::vector< std::pair<Iterator, Iterator> > partitions(P);
detail::_partition_range(first, last, partitions, P);
typename std::iterator_traits<Iterator>::difference_type
result = 0;
#pragma omp parallel for reduction(+:result)
for (int t = 0; t < int(P); ++t)
result += std::count_if(partitions[t].first,
partitions[t].second, pred);
return result;
}
};
template <>
struct Count_if_< std::input_iterator_tag >
{
template <class InputIterator, class Predicate>
typename std::iterator_traits<InputIterator>::difference_type
static count_if(InputIterator first, InputIterator last,
Predicate pred, const unsigned P)
{
return std::count_if(first, last, pred);
}
};
} // end namespace detail
template <class InputIterator, class Predicate>
typename std::iterator_traits<InputIterator>::difference_type
count_if(InputIterator first, InputIterator last,
Predicate pred, const unsigned P)
{
return detail::Count_if_<typename
std::iterator_traits<InputIterator>::iterator_category>::
count_if(first, last, pred, P);
}
template <class InputIterator, class Predicate, class Size>
void count_if(InputIterator first, InputIterator last,
Predicate pred, Size& n, const unsigned P)
{
n = ::omptl::count_if(first, last, pred, P);
}
namespace detail
{
template<class Iterator1Tag, class Iterator2Tag>
struct Equal_
{
template <class Iterator1, class Iterator2, class BinaryPredicate>
static bool _equal(Iterator1 first1, Iterator1 last1,
Iterator2 first2, BinaryPredicate binary_pred,
const unsigned P)
{
if (detail::_linear_serial_is_faster(first1, last1, P))
return std::equal(first1, last1, first2, binary_pred);
std::vector< std::pair<Iterator1, Iterator1> > source_partitions(P);
::omptl::detail::_partition_range(first1, last1, source_partitions, P);
std::vector<Iterator2> dest_partitions(P);
::omptl::detail::_copy_partitions(source_partitions, first2, dest_partitions, P);
bool result = true;
#pragma omp parallel for reduction(&:result)
for (int t = 0; t < int(P); ++t)
result &= std::equal( source_partitions[t].first,
source_partitions[t].second,
dest_partitions[t], binary_pred);
return result;
}
};
template<class Iterator2Tag>
struct Equal_<std::input_iterator_tag, Iterator2Tag>
{
template <class InputIterator1, class Iterator2, class BinaryPredicate>
static bool _equal(InputIterator1 first1, InputIterator1 last1,
Iterator2 first2, BinaryPredicate binary_pred,
const unsigned P)
{
return std::equal(first1, last1, first2, binary_pred);
}
};
template<class Iterator1Tag>
struct Equal_<Iterator1Tag, std::input_iterator_tag>
{
template <class Iterator1, class InputIterator2, class BinaryPredicate>
static bool _equal(Iterator1 first1, Iterator1 last1,
InputIterator2 first2, BinaryPredicate binary_pred,
const unsigned P)
{
return std::equal(first1, last1, first2, binary_pred);
}
};
template<>
struct Equal_<std::input_iterator_tag, std::input_iterator_tag>
{
template <class InputIterator1, class InputIterator2, class BinaryPredicate>
static bool _equal(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, BinaryPredicate binary_pred,
const unsigned P)
{
return std::equal(first1, last1, first2, binary_pred);
}
};
} // end namespace detail
template <class InputIterator1, class InputIterator2,
class BinaryPredicate>
bool equal(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, BinaryPredicate binary_pred, const unsigned P)
{
// return std::equal(first1, last1, first2, binary_pred);
return ::omptl::detail::Equal_<
typename std::iterator_traits<InputIterator1>::iterator_category,
typename std::iterator_traits<InputIterator2>::iterator_category>
::_equal(first1, last1, first2, binary_pred, P);
}
template <class InputIterator1, class InputIterator2>
bool equal(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, const unsigned P)
{
typedef typename std::iterator_traits<InputIterator1>::value_type VT;
return ::omptl::equal(first1, last1, first2, std::equal_to<VT>());
}
//TODO
template <class ForwardIterator, class T, class StrictWeakOrdering>
std::pair<ForwardIterator, ForwardIterator>
equal_range(ForwardIterator first, ForwardIterator last, const T& value,
StrictWeakOrdering comp, const unsigned P)
{
return std::equal_range(first, last, value, comp);
}
template <class ForwardIterator, class T>
std::pair<ForwardIterator, ForwardIterator>
equal_range(ForwardIterator first, ForwardIterator last, const T& value,
const unsigned P)
{
typedef typename std::iterator_traits<ForwardIterator>::value_type VT;
return ::omptl::equal_range(first, last, value, std::less<VT>(), P);
}
template <class ForwardIterator, class T>
void fill(ForwardIterator first, ForwardIterator last,
const T& value, const unsigned P)
{
assert(P > 0u);
if (detail::_linear_serial_is_faster(first, last, P))
{
std::fill(first, last, value);
return;
}
assert(std::distance(first, last) >= 0);
assert(2*(int)P <= std::distance(first, last));
std::vector< std::pair<ForwardIterator, ForwardIterator> > partitions(P);
::omptl::detail::_partition_range(first, last, partitions, P);
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
std::fill(partitions[t].first, partitions[t].second, value);
}
namespace detail
{
template <class IteratorTag>
struct Fill_n_
{
template <class Iterator, class Size, class T>
static Iterator fill_n(Iterator first, Size n, const T& value,
const unsigned P)
{
assert(P > 0u);
Iterator last = first;
std::advance(last, n);
if (detail::_linear_serial_is_faster(first, last, P))
return std::fill_n(first, n, value);
const Size range = (n / P) + ( (n % P) ? 1 : 0 );
std::vector<Size> ranges(P);
std::fill_n(ranges.begin(), P - 1, range);
ranges[P - 1] = n - (P - 1) * range;
std::vector<Iterator> partitions(P);
partitions[0] = first;
for (unsigned i = 1; i < P; ++i)
{
partitions[i] = partitions[i - 1];
std::advance(partitions[i], range);
}
Iterator result;
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
{
Iterator tmp;
*( (t == int(P-1)) ? &result : &tmp )
= std::fill_n(partitions[t], ranges[t], value);
}
return result;
}
};
template <>
struct Fill_n_< std::output_iterator_tag >
{
template <class OutputIterator, class Size, class T>
static OutputIterator fill_n(OutputIterator first, Size n,
const T& value, const unsigned P)
{
return std::fill_n(first, n, value);
}
};
} // end namespace detail
template <class OutputIterator, class Size, class T>
OutputIterator fill_n(OutputIterator first, Size n,
const T& value, const unsigned P)
{
return ::omptl::detail::Fill_n_<typename std::iterator_traits<OutputIterator>::iterator_category>::
fill_n(first, n, value, P);
}
namespace detail {
template <class IteratorTag>
struct Find_
{
template <class Iterator, class EqualityComparable>
static Iterator find(Iterator first, Iterator last,
const EqualityComparable& value,
const unsigned P)
{
if (detail::_linear_serial_is_faster(first, last, P))
return std::find(first, last, value);
std::vector< std::pair<Iterator, Iterator> > partitions(P);
::omptl::detail::_partition_range(first, last, partitions, P);
std::vector<Iterator> results(P);
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
{
results[t] = std::find(partitions[t].first, partitions[t].second, value);
if (results[t] == partitions[t].second)
results[t] = last;
}
typename std::vector<Iterator>::iterator result =
std::find_if(results.begin(),results.end(),
std::bind2nd(std::not_equal_to<Iterator>(), last) );
if ( result != results.end() )
return *result;
return last;
}
};
template <>
struct Find_< std::input_iterator_tag >
{
template<class InputIterator, class EqualityComparable>
static InputIterator find(InputIterator first, InputIterator last,
const EqualityComparable& value, const unsigned P)
{
return std::find(first, last, value);
}
};
} // end namespace detail
template<class InputIterator, class EqualityComparable>
InputIterator find(InputIterator first, InputIterator last,
const EqualityComparable& value, const unsigned P)
{
return ::omptl::detail::Find_< typename std::iterator_traits<InputIterator>::iterator_category >::
find(first, last, value, P);
}
namespace detail
{
template <class IteratorTag>
struct Find_if_
{
template <class Iterator, class Predicate>
static Iterator find_if(Iterator first, Iterator last, Predicate pred,
const unsigned P, IteratorTag)
{
if (detail::_linear_serial_is_faster(first, last, P))
return std::find_if(first, last, pred);
std::vector< std::pair<Iterator, Iterator> > partitions(P);
::omptl::detail::_partition_range(first, last, partitions, P);
std::vector<Iterator> results(P);
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
{
results[t] = std::find_if(partitions[t].first, partitions[t].second, pred);
if (results[t] == partitions[t].second)
results[t] = last;
}
const typename std::vector<Iterator>::iterator result
= std::find_if(results.begin(), results.end(),
std::bind2nd(std::not_equal_to<Iterator>(), last) );
if ( result != results.end() )
return *result;
return last;
}
};
template <>
struct Find_if_< std::input_iterator_tag >
{
template <class InputIterator, class Predicate>
static InputIterator _find_if(InputIterator first, InputIterator last,
Predicate pred, const unsigned P)
{
return std::find_if(first, last, pred);
}
};
} // end namespace detail
template<class InputIterator, class Predicate>
InputIterator find_if(InputIterator first, InputIterator last,
Predicate pred, const unsigned P)
{
return ::omptl::detail::Find_if_<typename std::iterator_traits<InputIterator>::iterator_category>::
find_if(first, last, pred, P);
}
// TODO
template <class ForwardIterator1, class ForwardIterator2,
class BinaryPredicate>
ForwardIterator1 find_end(ForwardIterator1 first1, ForwardIterator1 last1,
ForwardIterator2 first2, ForwardIterator2 last2,
BinaryPredicate comp, const unsigned P)
{
return std::find_end(first1, last1, first2, last2, comp);
}
template <class ForwardIterator1, class ForwardIterator2>
ForwardIterator1 find_end(ForwardIterator1 first1, ForwardIterator1 last1,
ForwardIterator2 first2, ForwardIterator2 last2,
const unsigned P)
{
// typedef typename std::iterator_traits<ForwardIterator1>::value_type VT;
// return ::omptl::find_end(first1, last1, first2, last2, std::less<VT>());
return std::find_end(first1, last1, first2, last2);
}
namespace detail
{
// find_first_of suffers from a loss of efficiency, and potentially a loss of
// performance when executed in parallel!
template <class IteratorTag>
struct Find_first_of_
{
template <class Iterator, class ForwardIterator, class BinaryPredicate>
static Iterator
find_first_of(Iterator first1, Iterator last1,
ForwardIterator first2, ForwardIterator last2,
BinaryPredicate comp, const unsigned P)
{
if (detail::_linear_serial_is_faster(first1, last1, P))
return std::find_first_of(first1, last1,
first2, last2, comp);
std::vector< std::pair<Iterator, Iterator> > partitions(P);
::omptl::detail::_partition_range(first1, last1, partitions, P);
std::vector<Iterator> results(P);
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
{
results[t] = std::find_first_of(partitions[t].first,
partitions[t].second,
first2, last2, comp);
if (results[t] == partitions[t].second)
results[t] = last1;
}
const typename std::vector<Iterator>::iterator
result = std::find_if(results.begin(), results.end(),
std::bind2nd(std::not_equal_to<Iterator>(), last1));
if ( result != results.end() )
return *result;
return last1;
}
};
template <>
struct Find_first_of_< std::input_iterator_tag >
{
template <class InputIterator, class ForwardIterator,
class BinaryPredicate>
static InputIterator
find_first_of( InputIterator first1, InputIterator last1,
ForwardIterator first2, ForwardIterator last2,
BinaryPredicate comp, const unsigned P)
{
return std::find_first_of(first1, last1, first2, last2, comp);
}
};
} // end namespace detail
template <class InputIterator, class ForwardIterator, class BinaryPredicate>
InputIterator find_first_of(InputIterator first1, InputIterator last1,
ForwardIterator first2, ForwardIterator last2,
BinaryPredicate comp, const unsigned P)
{
return ::omptl::detail::Find_first_of_<typename std::iterator_traits<InputIterator>::iterator_category>::
find_first_of(first1, last1, first2, last2, comp, P);
}
template <class InputIterator, class ForwardIterator>
InputIterator find_first_of(InputIterator first1, InputIterator last1,
ForwardIterator first2, ForwardIterator last2,
const unsigned P)
{
typedef typename std::iterator_traits<InputIterator>::value_type VT;
return ::omptl::find_first_of(first1, last1, first2, last2, std::equal_to<VT>());
}
namespace detail
{
template <class IteratorTag>
struct For_each_
{
template <class Iterator, class UnaryFunction>
static UnaryFunction for_each(Iterator first, Iterator last,
UnaryFunction f, const unsigned P)
{
if (detail::_linear_serial_is_faster(first, last, P))
return std::for_each(first, last, f);
std::vector< std::pair<Iterator, Iterator> > partitions(P);
::omptl::detail::_partition_range(first, last, partitions, P);
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
std::for_each(partitions[t].first, partitions[t].second, f);
return f;
}
};
template <>
struct For_each_< std::input_iterator_tag >
{
template <class InputIterator, class UnaryFunction>
static UnaryFunction for_each(InputIterator first, InputIterator last, UnaryFunction f, const unsigned P)
{
return std::for_each(first, last, f);
}
};
} // end namespace detail
template <class InputIterator, class UnaryFunction>
UnaryFunction for_each(InputIterator first, InputIterator last, UnaryFunction f, const unsigned P)
{
return ::omptl::detail::For_each_<typename std::iterator_traits<InputIterator>::iterator_category>::
for_each(first, last, f, P);
}
template <class ForwardIterator, class Generator>
void generate(ForwardIterator first, ForwardIterator last, Generator gen)
{
std::generate(first, last, gen);
}
template <class ForwardIterator, class Generator>
void par_generate(ForwardIterator first, ForwardIterator last,
Generator gen, const unsigned P)
{
if (detail::_linear_serial_is_faster(first, last, P))
{
std::generate(first, last, gen);
return;
}
std::vector< std::pair<ForwardIterator, ForwardIterator> > partitions(P);
::omptl::detail::_partition_range(first, last, partitions, P);
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
std::generate(partitions[t].first, partitions[t].second, gen);
}
template <class RandomAccessIterator, class StrictWeakOrdering>
void push_heap(RandomAccessIterator first, RandomAccessIterator last,
StrictWeakOrdering comp, const unsigned P)
{
return std::push_heap(first, last, comp);
}
template <class RandomAccessIterator>
void push_heap(RandomAccessIterator first, RandomAccessIterator last,
const unsigned P)
{
// std::less<typename
// std::iterator_traits<RandomAccessIterator>::value_type>(),
return std::push_heap(first, last);
}
template <class RandomAccessIterator, class StrictWeakOrdering>
inline void pop_heap(RandomAccessIterator first, RandomAccessIterator last,
StrictWeakOrdering comp, const unsigned P)
{
return std::pop_heap(first, last, comp);
}
template <class RandomAccessIterator>
inline void pop_heap(RandomAccessIterator first, RandomAccessIterator last, const unsigned P)
{
// std::less<typename
// std::iterator_traits<RandomAccessIterator>::value_type>
return std::pop_heap(first, last);
}
template <class RandomAccessIterator, class StrictWeakOrdering>
void make_heap(RandomAccessIterator first, RandomAccessIterator last,
StrictWeakOrdering comp, const unsigned P)
{
return std::make_heap(first, last, comp);
}
template <class RandomAccessIterator>
void make_heap(RandomAccessIterator first, RandomAccessIterator last, const unsigned P)
{
// std::less<typename
// std::iterator_traits<RandomAccessIterator>::value_type>(),
return std::make_heap(first, last);
}
template <class RandomAccessIterator, class StrictWeakOrdering>
void sort_heap(RandomAccessIterator first, RandomAccessIterator last,
StrictWeakOrdering comp, const unsigned P)
{
return std::sort_heap(first, last, comp);
}
template <class RandomAccessIterator>
void sort_heap(RandomAccessIterator first, RandomAccessIterator last, const unsigned P)
{
// std::less<typename
// std::iterator_traits<RandomAccessIterator>::value_type>
return std::sort_heap(first, last);
}
namespace detail
{
template <class Iterator1Tag, class Iterator2Tag>
struct Includes_
{
template <class Iterator1, class Iterator2, class StrictWeakOrdering>
static bool includes(Iterator1 first1, Iterator1 last1,
Iterator2 first2, Iterator2 last2,
StrictWeakOrdering comp, const unsigned P)
{
if (detail::_linear_serial_is_faster(first2, last2, P))
return std::includes(first1, last1, first2, last2, comp);
/*
* Includes is parallelized by splitting the second range
* (needles), rather than the first (the haystack).
*/
std::vector< std::pair<Iterator2, Iterator2> >partitions(P);
::omptl::detail::_partition_range(first2, last2, partitions, P);
bool result = true;
// Hence, all needles should be found in the haystack
#pragma omp parallel for reduction(&:result)
for (int t = 0; t < int(P); ++t)
result &= std::includes(first1, last1,
partitions[t].first,
partitions[t].second, comp);
return result;
}
};
template <class Iterator2Tag>
struct Includes_< std::input_iterator_tag, Iterator2Tag >
{
template <class InputIterator1, class Iterator2, class StrictWeakOrdering>
static bool includes(InputIterator1 first1, InputIterator1 last1,
Iterator2 first2, Iterator2 last2,
StrictWeakOrdering comp, const unsigned P)
{
return std::includes(first1, last1, first2, last2, comp);
}
};
template <class Iterator1Tag>
struct Includes_<Iterator1Tag, std::input_iterator_tag>
{
template <class Iterator1, class InputIterator2, class StrictWeakOrdering>
static bool includes(Iterator1 first1, Iterator1 last1,
InputIterator2 first2, InputIterator2 last2,
StrictWeakOrdering comp, const unsigned P)
{
return std::includes(first1, last1, first2, last2, comp);
}
};
template <>
struct Includes_< std::input_iterator_tag, std::input_iterator_tag >
{
template <class InputIterator1, class InputIterator2, class StrictWeakOrdering>
static bool includes(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, InputIterator2 last2,
StrictWeakOrdering comp, const unsigned P)
{
return std::includes(first1, last1, first2, last2, comp);
}
};
} // end namespace detail
template <class InputIterator1, class InputIterator2, class StrictWeakOrdering>
bool includes(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, InputIterator2 last2,
StrictWeakOrdering comp, const unsigned P)
{
typedef typename std::iterator_traits<InputIterator1>::iterator_category IC1;
typedef typename std::iterator_traits<InputIterator2>::iterator_category IC2;
return ::omptl::detail::Includes_<IC1, IC2>::includes(first1, last1, first2, last2, comp, P);
}
template <class InputIterator1, class InputIterator2>
bool includes(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, InputIterator2 last2,
const unsigned P)
{
typedef typename std::iterator_traits<InputIterator1>::value_type VT;
return ::omptl::includes(first1, last1, first2, last2, std::less<VT>());
}
template <class InputIterator1, class InputIterator2, class BinaryPredicate>
bool lexicographical_compare(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, InputIterator2 last2,
BinaryPredicate comp, const unsigned P)
{
return std::lexicographical_compare(first1, last1, first2, last2, comp);
}
template <class InputIterator1, class InputIterator2>
bool lexicographical_compare(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, InputIterator2 last2,
const unsigned P)
{
// std::less<typename
// std::iterator_traits<InputIterator1>::value_type>
return std::lexicographical_compare(first1, last1, first2, last2);
}
template <class ForwardIterator, class T, class StrictWeakOrdering>
ForwardIterator lower_bound(ForwardIterator first, ForwardIterator last,
const T& value, StrictWeakOrdering comp,
const unsigned P)
{
if (detail::_logn_serial_is_faster(first, last, P))
return std::lower_bound(first, last, value, comp);
std::vector< std::pair<ForwardIterator, ForwardIterator> > partitions(P);
::omptl::detail::_partition_range(first, last, partitions, P);
std::vector<ForwardIterator> results(P);
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
results[t] = std::lower_bound(partitions[t].first, partitions[t].second, value, comp);
const typename std::vector<ForwardIterator>::iterator result =
std::find_if(results.begin(), results.end(), std::bind2nd(std::not_equal_to<ForwardIterator>(), last) );
if (result != results.end())
return *result;
return last;
}
template <class ForwardIterator, class T>
ForwardIterator lower_bound(ForwardIterator first, ForwardIterator last,
const T& value, const unsigned P)
{
return ::omptl::lower_bound(first, last, value, std::less<T>(), P);
}
// Not parallelized, dependencies between data.
template <class InputIterator1, class InputIterator2, class OutputIterator,
class StrictWeakOrdering>
OutputIterator merge(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, InputIterator2 last2,
OutputIterator result,
StrictWeakOrdering comp, const unsigned P)
{
return std::merge(first1, last1, first2, last2, result, comp);
}
template <class InputIterator1, class InputIterator2, class OutputIterator>
OutputIterator merge(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, InputIterator2 last2,
OutputIterator result, const unsigned P)
{
// std::less<typename
// std::iterator_traits<InputIterator1>::value_type>
return std::merge(first1, last1, first2, last2, result);
}
template <class ForwardIterator, class BinaryPredicate>
ForwardIterator min_element(ForwardIterator first, ForwardIterator last,
BinaryPredicate comp, const unsigned P)
{
if (detail::_linear_serial_is_faster(first, last, P))
return std::min_element(first, last, comp);
std::vector< std::pair<ForwardIterator, ForwardIterator> > partitions(P);
::omptl::detail::_partition_range(first, last, partitions, P);
std::vector<ForwardIterator> results(P);
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
results[t] = std::min_element(partitions[t].first, partitions[t].second, comp);
ForwardIterator result = results[0];
for (unsigned i = 1; i < P; ++i)
if ( (result != last) && (results[i] != last) && comp(*results[i], *result) )
result = results[i];
return result;
}
template <class ForwardIterator>
ForwardIterator min_element(ForwardIterator first, ForwardIterator last,
const unsigned P)
{
typedef typename std::iterator_traits<ForwardIterator>::value_type value_type;
return ::omptl::min_element(first, last, std::less<value_type>(), P);
}
template <class ForwardIterator, class BinaryPredicate>
ForwardIterator max_element(ForwardIterator first, ForwardIterator last,
BinaryPredicate comp, const unsigned P)
{
if (detail::_linear_serial_is_faster(first, last, P))
return std::max_element(first, last, comp);
std::vector< std::pair<ForwardIterator, ForwardIterator> > partitions(P);
::omptl::detail::_partition_range(first, last, partitions, P);
std::vector<ForwardIterator> results(P);
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
results[t] = std::max_element(partitions[t].first, partitions[t].second, comp);
ForwardIterator result = results[0];
for (unsigned i = 1; i < P; ++i)
{
if ( (result != last) && (results[i] != last) && comp(*result, *results[i]) )
result = results[i];
}
return result;
}
template <class ForwardIterator>
ForwardIterator max_element(ForwardIterator first, ForwardIterator last,
const unsigned P)
{
typedef typename std::iterator_traits<ForwardIterator>::value_type value_type;
return ::omptl::max_element(first, last, std::less<value_type>(), P);
}
namespace detail
{
template <class Iterator1Tag, class Iterator2Tag>
struct Mismatch_
{
template <class Iterator1, class Iterator2, class BinaryPredicate>
static std::pair<Iterator1, Iterator2>
mismatch(Iterator1 first1, Iterator1 last1, Iterator2 first2,
BinaryPredicate binary_pred, const unsigned P)
{
if (detail::_linear_serial_is_faster(first1, last1, P))
return std::mismatch(first1, last1, first2, binary_pred);
std::vector< std::pair<Iterator1, Iterator1> > source_partitions(P);
::omptl::detail::_partition_range(first1, last1, source_partitions, P);
std::vector<Iterator2> dest_partitions(P);
::omptl::detail::_copy_partitions(source_partitions, first2, dest_partitions, P);
std::vector< std::pair<Iterator1, Iterator2> > results(P);
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
results[t] = std::mismatch(source_partitions[t].first,
source_partitions[t].second,
dest_partitions[t], binary_pred);
// This could have been done more elegantly with select1st
for (unsigned i = 0; i < P - 1; ++i)
if (results[i].first != source_partitions[i].second)
return results[i];
return results[P - 1];
}
};
template <class Iterator1Tag>
struct Mismatch_<Iterator1Tag, std::input_iterator_tag >
{
template <class InputIterator1, class InputIterator2,
class BinaryPredicate>
static std::pair<InputIterator1, InputIterator2>
mismatch(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, BinaryPredicate binary_pred,
const unsigned P)
{
return std::mismatch(first1, last1, first2, binary_pred);
}
};
template <class Iterator2Tag>
struct Mismatch_< std::input_iterator_tag, Iterator2Tag >
{
template <class InputIterator1, class InputIterator2,
class BinaryPredicate>
static std::pair<InputIterator1, InputIterator2>
mismatch(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, BinaryPredicate binary_pred,
const unsigned P)
{
return std::mismatch(first1, last1, first2, binary_pred);
}
};
template <>
struct Mismatch_< std::input_iterator_tag, std::input_iterator_tag >
{
template <class InputIterator1, class InputIterator2,
class BinaryPredicate>
static std::pair<InputIterator1, InputIterator2>
mismatch(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, BinaryPredicate binary_pred,
const unsigned P)
{
return std::mismatch(first1, last1, first2, binary_pred);
}
};
} // end namespace detail
template <class InputIterator1, class InputIterator2, class BinaryPredicate>
std::pair<InputIterator1, InputIterator2>
mismatch(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2,
BinaryPredicate binary_pred, const unsigned P)
{
return ::omptl::detail::Mismatch_<
typename std::iterator_traits<InputIterator1>::iterator_category,
typename std::iterator_traits<InputIterator2>::iterator_category>::
mismatch(first1, last1, first2, binary_pred, P);
}
template <class InputIterator1, class InputIterator2>
std::pair<InputIterator1, InputIterator2>
mismatch(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, const unsigned P)
{
typedef typename std::iterator_traits<InputIterator1>::value_type VT;
return ::omptl::mismatch(first1, last1, first2,std::equal_to<VT>(),P);
}
// TODO How can this be parallelized ?
template <class RandomAccessIterator, class StrictWeakOrdering>
void nth_element(RandomAccessIterator first, RandomAccessIterator nth,
RandomAccessIterator last,
StrictWeakOrdering comp, const unsigned P)
{
std::nth_element(first, nth, last, comp);
}
template <class RandomAccessIterator>
void nth_element(RandomAccessIterator first, RandomAccessIterator nth,
RandomAccessIterator last, const unsigned P)
{
// typedef typename
// std::iterator_traits<RandomAccessIterator>::value_type
// std::less<VT>
std::nth_element(first, nth, last);
}
namespace detail
{
template<typename Iterator, class StrictWeakOrdering>
Iterator _pivot_range(Iterator first, Iterator last,
const typename std::iterator_traits<Iterator>::value_type pivot,
StrictWeakOrdering comp)
{
while (first < last)
{
if (comp(*first, pivot))
++first;
else
{
while ( (first < --last) && !comp(*last, pivot) )
/* nop */;
std::iter_swap(first, last);
}
}
return last;
}
} // end namespace detail
template <class RandomAccessIterator, class StrictWeakOrdering>
void partial_sort(RandomAccessIterator first,
RandomAccessIterator middle,
RandomAccessIterator last,
StrictWeakOrdering comp, const unsigned P)
{
const typename std::iterator_traits<RandomAccessIterator>::difference_type
N = std::distance(first, last);
assert(N >= 0);
if (2*P < unsigned(N))
{
::omptl::detail::_pivot_range(first, last, *middle, comp);
::omptl::sort(first, middle, comp, P);
}
else
std::partial_sort(first, last, middle, comp);
}
template <class RandomAccessIterator>
void partial_sort(RandomAccessIterator first, RandomAccessIterator middle,
RandomAccessIterator last, const unsigned P)
{
typedef typename std::iterator_traits<RandomAccessIterator>::value_type VT;
::omptl::partial_sort(first, middle, last, std::less<VT>(), P);
}
// Not parallelized due to dependencies.
template <class InputIterator, class RandomAccessIterator,
class StrictWeakOrdering>
RandomAccessIterator
partial_sort_copy(InputIterator first, InputIterator last,
RandomAccessIterator result_first,
RandomAccessIterator result_last, StrictWeakOrdering comp,
const unsigned P)
{
return std::partial_sort_copy(first, last, result_first, result_last, comp);
}
// Not parallelized due to dependencies.
template <class InputIterator, class RandomAccessIterator>
RandomAccessIterator
partial_sort_copy(InputIterator first, InputIterator last,
RandomAccessIterator result_first,
RandomAccessIterator result_last, const unsigned P)
{
// std::less<typename
// std::iterator_traits<InputIterator>::value_type>(),
return std::partial_sort_copy(first, last, result_first, result_last);
}
// Not (yet) parallelized, not straightforward due to possible dependencies
// between subtasks.
template <class ForwardIterator, class Predicate>
ForwardIterator partition(ForwardIterator first, ForwardIterator last,
Predicate pred, const unsigned P)
{
return std::partition(first, last, pred);
}
// Not (yet) parallelized, not straightforward due to possible dependencies
// between subtasks.
template <class ForwardIterator, class Predicate>
ForwardIterator stable_partition(ForwardIterator first, ForwardIterator last,
Predicate pred, const unsigned P)
{
return std::stable_partition(first, last, pred);
}
template <class BidirectionalIterator, class StrictWeakOrdering>
bool next_permutation(BidirectionalIterator first, BidirectionalIterator last,
StrictWeakOrdering comp, const unsigned P)
{
return std::next_permutation(first, last, comp);
}
template <class BidirectionalIterator>
bool next_permutation(BidirectionalIterator first, BidirectionalIterator last, const unsigned P)
{
// std::less<typename
// std::iterator_traits<BidirectionalIterator>::value_type>
return std::next_permutation(first, last);
}
template <class BidirectionalIterator, class StrictWeakOrdering>
bool prev_permutation(BidirectionalIterator first, BidirectionalIterator last,
StrictWeakOrdering comp, const unsigned P)
{
return std::prev_permutation(first, last, comp);
}
template <class BidirectionalIterator>
bool prev_permutation(BidirectionalIterator first, BidirectionalIterator last,
const unsigned P)
{
// std::less<typename
// std::iterator_traits<BidirectionalIterator>::value_type>(),
return std::prev_permutation(first, last);
}
template <class RandomAccessIterator>
void random_shuffle(RandomAccessIterator first, RandomAccessIterator last,
const unsigned P)
{
std::random_shuffle(first, last);
}
template <class RandomAccessIterator, class RandomNumberGenerator>
void random_shuffle(RandomAccessIterator first, RandomAccessIterator last,
RandomNumberGenerator& rgen, const unsigned P)
{
std::random_shuffle(first, last, rgen);
}
// Not (yet) parallelized, not straightforward due to possible dependencies
// between subtasks.
template <class ForwardIterator, class T>
ForwardIterator remove( ForwardIterator first, ForwardIterator last,
const T& value, const unsigned P)
{
return std::remove(first, last, value);
}
// Not (yet) parallelized, not straightforward due to possible dependencies
// between subtasks.
template <class ForwardIterator, class Predicate>
ForwardIterator remove_if(ForwardIterator first, ForwardIterator last,
Predicate pred, const unsigned P)
{
return std::remove_if(first, last, pred);
}
// Not parallelized due to possible complications with OutputIterators.
// No par_remove_copy exists due to possible dependencies between subtasks.
template <class InputIterator, class OutputIterator, class T>
OutputIterator remove_copy(InputIterator first, InputIterator last,
OutputIterator result, const T& value,
const unsigned P)
{
return std::remove_copy(first, last, result, value);
}
// Not parallelized due to possible complications with OutputIterators.
// No par_remove_copy_if exists due to possible dependencies between subtasks.
template <class InputIterator, class OutputIterator, class Predicate>
OutputIterator remove_copy_if(InputIterator first, InputIterator last,
OutputIterator result, Predicate pred,
const unsigned P)
{
return std::remove_copy(first, last, result, pred);
}
template <class ForwardIterator, class T>
void replace(ForwardIterator first, ForwardIterator last, const T& old_value,
const T& new_value, const unsigned P)
{
if (detail::_linear_serial_is_faster(first, last, P))
{
std::replace(first, last, old_value, new_value);
return;
}
std::vector< std::pair<ForwardIterator, ForwardIterator> > partitions(P);
::omptl::detail::_partition_range(first, last, partitions, P);
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
std::replace(partitions[t].first, partitions[t].second, old_value, new_value);
}
namespace detail
{
template <class Iterator1Tag, class Iterator2Tag>
struct Replace_copy_if_
{
template <class Iterator1, class Iterator2, class Predicate, class T>
static Iterator2
replace_copy_if(Iterator1 first, Iterator1 last,
Iterator2 result, Predicate pred,
const T& new_value, const unsigned P)
{
if (detail::_linear_serial_is_faster(first, last, P))
return std::replace_copy_if(first, last, result, pred, new_value);
std::vector< std::pair<Iterator1, Iterator1> > source_partitions(P);
::omptl::detail::_partition_range(first, last, source_partitions, P);
std::vector<Iterator2> dest_partitions(P);
::omptl::detail::_copy_partitions(source_partitions, result, dest_partitions, P);
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
{
Iterator2 tmp;
*( (t == int(P-1)) ? &result : &tmp )
= std::replace_copy_if(source_partitions[t].first,
source_partitions[t].second,
dest_partitions[t], pred, new_value);
}
return result;
}
};
template <class Iterator2Tag>
struct Replace_copy_if_< std::input_iterator_tag, Iterator2Tag>
{
template <class Iterator1, class Iterator2,
class Predicate, class T>
static Iterator2
replace_copy_if(Iterator1 first, Iterator1 last,
Iterator2 result, Predicate pred,
const T& new_value, const unsigned P)
{
return std::replace_copy_if(first, last, result, pred, new_value);
}
};
template <class Iterator1Tag>
struct Replace_copy_if_< Iterator1Tag, std::output_iterator_tag>
{
template <class Iterator1, class OutputIterator,
class Predicate, class T>
static OutputIterator
replace_copy_if(Iterator1 first, Iterator1 last,
OutputIterator result, Predicate pred,
const T& new_value, const unsigned P)
{
return std::replace_copy_if(first, last, result, pred, new_value);
}
};
template <>
struct Replace_copy_if_< std::input_iterator_tag, std::output_iterator_tag>
{
template <class InputIterator, class OutputIterator,
class Predicate, class T>
static OutputIterator
replace_copy_if(InputIterator first, InputIterator last,
OutputIterator result, Predicate pred,
const T& new_value, const unsigned P)
{
return std::replace_copy_if(first, last, result, pred, new_value);
}
};
} // end namespace detail
template <class InputIterator, class OutputIterator, class Predicate, class T>
OutputIterator replace_copy_if(InputIterator first, InputIterator last,
OutputIterator result, Predicate pred,
const T& new_value, const unsigned P)
{
return ::omptl::detail::Replace_copy_if_<
typename std::iterator_traits< InputIterator>::iterator_category,
typename std::iterator_traits<OutputIterator>::iterator_category>
::replace_copy_if(first, last, result, pred, new_value, P);
}
template <class InputIterator, class OutputIterator, class T>
OutputIterator replace_copy(InputIterator first, InputIterator last,
OutputIterator result, const T& old_value,
const T& new_value, const unsigned P)
{
return ::omptl::replace_copy_if(first, last, result,
std::bind2nd(std::equal_to<T>(), old_value), new_value, P);
}
template <class ForwardIterator, class Predicate, class T>
void replace_if(ForwardIterator first, ForwardIterator last, Predicate pred,
const T& new_value, const unsigned P)
{
if (detail::_linear_serial_is_faster(first, last, P))
return std::replace_if(first, last, pred, new_value);
std::vector< std::pair<ForwardIterator, ForwardIterator> > partitions(P);
::omptl::detail::_partition_range(first, last, partitions, P);
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
std::replace_if(partitions[t].first, partitions[t].second, pred, new_value);
}
// TODO
template <class BidirectionalIterator>
void reverse(BidirectionalIterator first, BidirectionalIterator last, const unsigned P)
{
std::reverse(first, last);
}
// TODO
template <class BidirectionalIterator, class OutputIterator>
OutputIterator reverse_copy(BidirectionalIterator first,
BidirectionalIterator last,
OutputIterator result, const unsigned P)
{
return std::reverse_copy(first, last, result);
}
// TODO
template <class ForwardIterator>
ForwardIterator rotate( ForwardIterator first, ForwardIterator middle,
ForwardIterator last, const unsigned P)
{
return std::rotate(first, middle, last);
}
// TODO
template <class ForwardIterator, class OutputIterator>
OutputIterator rotate_copy(ForwardIterator first, ForwardIterator middle,
ForwardIterator last, OutputIterator result,
const unsigned P)
{
return std::rotate(first, middle, last, result);
}
/*
This can't be right - partitioning the range might cut valid subsequences
in [first1-last1]
template <class ForwardIterator1, class ForwardIterator2,
class BinaryPredicate>
ForwardIterator1 search(ForwardIterator1 first1, ForwardIterator1 last1,
ForwardIterator2 first2, ForwardIterator2 last2,
BinaryPredicate binary_pred, const unsigned P)
{
if (detail::_linear_serial_is_faster(first1, last1, P))
return std::search(first1, last1, first2, last2,
binary_pred);
std::vector< std::pair<ForwardIterator1, ForwardIterator1> >
partitions(P);
::omptl::detail::_partition_range(first1, last1, partitions, P);
std::vector<ForwardIterator1> results(P);
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
{
results[t] = std::search(partitions[t].first,
partitions[t].second,
first2, last2, binary_pred);
}
const typename std::vector<ForwardIterator1>::iterator
result = std::find_if(results.begin(), results.end(),
std::bind2nd(std::not_equal_to<ForwardIterator1>(),
last1));
if (result != results.end())
return *result;
return last1;
}
*/
template <class ForwardIterator1, class ForwardIterator2,
class BinaryPredicate>
ForwardIterator1 search(ForwardIterator1 first1, ForwardIterator1 last1,
ForwardIterator2 first2, ForwardIterator2 last2,
BinaryPredicate binary_pred, const unsigned P)
{
return std::search(first1, last1, first2, last2, binary_pred);
}
template <class ForwardIterator1, class ForwardIterator2>
ForwardIterator1 search(ForwardIterator1 first1, ForwardIterator1 last1,
ForwardIterator2 first2, ForwardIterator2 last2,
const unsigned P)
{
// typedef typename
// std::iterator_traits<ForwardIterator1>::value_type VT;
// return ::omptl::search(first1, last1, first2, last2,
// std::equal_to<VT>(), P);
return std::search(first1, last1, first2, last2);
}
// TODO
template <class ForwardIterator, class Integer,
class T, class BinaryPredicate>
ForwardIterator search_n(ForwardIterator first, ForwardIterator last,
Integer count, const T& value,
BinaryPredicate binary_pred, const unsigned P)
{
return std::search_n(first, last, count, value, binary_pred);
}
template <class ForwardIterator, class Integer, class T>
ForwardIterator search_n(ForwardIterator first, ForwardIterator last,
Integer count, const T& value, const unsigned P)
{
// std::equal_to<typename
// std::iterator_traits<ForwardIterator>::value_type>
return std::search_n(first, last, count, value);
}
template <class InputIterator1, class InputIterator2, class OutputIterator,
class StrictWeakOrdering>
OutputIterator set_difference(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, InputIterator2 last2,
OutputIterator result, StrictWeakOrdering comp,
const unsigned P)
{
return std::set_difference(first1, last1, first2, last2, result, comp);
}
template <class InputIterator1, class InputIterator2, class OutputIterator>
OutputIterator set_difference(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, InputIterator2 last2,
OutputIterator result, const unsigned P)
{
return std::set_difference(first1, last1, first2, last2, result);
}
template <class InputIterator1, class InputIterator2, class OutputIterator,
class StrictWeakOrdering>
OutputIterator set_intersection(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, InputIterator2 last2,
OutputIterator result, StrictWeakOrdering comp,
const unsigned P)
{
return std::set_intersection( first1, last1, first2, last2, result, comp);
}
template <class InputIterator1, class InputIterator2, class OutputIterator>
OutputIterator set_intersection(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, InputIterator2 last2,
OutputIterator result, const unsigned P)
{
return std::set_intersection( first1, last1, first2, last2, result);
}
template <class InputIterator1, class InputIterator2, class OutputIterator, class StrictWeakOrdering>
OutputIterator
set_symmetric_difference(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, InputIterator2 last2,
OutputIterator result, StrictWeakOrdering comp,
const unsigned P)
{
return std::set_symmetric_difference( first1, last1, first2, last2, result, comp);
}
template <class InputIterator1, class InputIterator2, class OutputIterator, class StrictWeakOrdering>
OutputIterator
set_symmetric_difference(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, InputIterator2 last2,
OutputIterator result, const unsigned P)
{
return std::set_symmetric_difference( first1, last1, first2, last2, result);
}
template <class InputIterator1, class InputIterator2, class OutputIterator, class StrictWeakOrdering>
OutputIterator set_union(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, InputIterator2 last2,
OutputIterator result, StrictWeakOrdering comp,
const unsigned P)
{
return std::set_union(first1, last1, first2, last2, result, comp);
}
template <class InputIterator1, class InputIterator2, class OutputIterator>
OutputIterator set_union(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, InputIterator2 last2,
OutputIterator result, const unsigned P)
{
return std::set_union(first1, last1, first2, last2, result);
}
template<typename RandomAccessIterator, class StrictWeakOrdering>
void sort(RandomAccessIterator first, RandomAccessIterator last,
StrictWeakOrdering comp, const unsigned P)
{
if ( ::omptl::detail::_nlogn_serial_is_faster(first, last, P) )
{
std::sort(first, last, comp);
return;
}
assert(std::distance(first, last) >= 3u*P);
// Generate pivots
typedef typename std::iterator_traits<RandomAccessIterator>::value_type value_type;
std::vector<value_type> pivots;
::omptl::detail::_find_pivots(first, last, pivots, comp, P);
// Sort sufficiently to respect pivot order
typedef std::pair<RandomAccessIterator, RandomAccessIterator> Partition;
std::vector< Partition > borders(1, std::make_pair(first, last));
std::vector<char> pivot_used(pivots.size(), false); // can't be bool due to parallel write
const unsigned max_depth = std::floor(std::tr1::log2(P));
assert(1u << max_depth <= P);
for (unsigned i = 0; i < max_depth; ++i)
{
const int Npartitions = borders.size();
assert(borders.size() == 1u << i);
assert(borders.size() <= P);
//std::cerr << "depth: " << i << " size: " << Npartitions << " new size: " << (2*Npartitions) << std::endl;
std::vector< Partition > new_borders(2u*Npartitions);
#pragma omp parallel for
for (int p = 0; p < Npartitions; ++p)
{
if (2*p+1 >= int(P))
continue;
const unsigned pivot_index = (2*p+1) * pivots.size() / (1u<<i) / 2u;
assert(pivot_index < pivots.size());
//std::cerr << "\tp: " << p << " P: " << P << " Npartitions: " << Npartitions << " pivot_index: " << pivot_index << std::endl;
assert(!pivot_used[pivot_index]);
pivot_used[pivot_index] = true;
const RandomAccessIterator middle =
detail::_pivot_range(borders[p].first,
borders[p].second,
pivots[pivot_index], comp);
new_borders[2*p ] = std::make_pair(borders[p].first, middle);
new_borders[2*p + 1] = std::make_pair(middle, borders[p].second);
}
std::swap(borders, new_borders);
}
assert(borders.size() <= P);
assert(borders[0].first == first);
for (unsigned i = 0; i < borders.size()-1; ++i)
assert(borders[i].second == borders[i+1].first);
assert(borders.back().second == last);
// Powers of two are easy: sort and leave
if (borders.size() == P)
{
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
std::sort(borders[t].first, borders[t].second, comp);
return;
}
// For non-powers of two, split remaining partitions and sort those
// that are already their final size.
std::vector< Partition > partitions;
std::vector<bool> final;
std::vector<bool> dummy;
for (unsigned i = 0; i < borders.size(); ++i)
{
partitions.push_back(borders[i]);
dummy.push_back(false);
const unsigned pivot_index = (2*i+1) * pivots.size() / borders.size() / 2;
assert(pivot_index < pivots.size());
if (pivot_used[pivot_index])
final.push_back(true);
else
{
// meta-data first part
final.push_back(false);
// dummy to be overwritten by splitting
partitions.push_back( std::make_pair(last, last) ); // dummy
dummy.push_back(true);
final.push_back(false);
}
}
assert(partitions.size() == P);
assert(final.size() == P);
assert(dummy.size() == P);
/*
for (unsigned i = 0; i < pivot_used.size(); ++i)
std::cout << bool(pivot_used[i]) << " ";
std::cout << std::endl;
std::cout << borders.size() << " " << partitions.size() << " " << P << std::endl;
*/
// Round one: sort final partitions, split remaining
#pragma omp parallel for
for (int i = 0; i < int(partitions.size()); ++i)
{
//std::cout << i;
if (final[i])
{
assert(!dummy[i]);
std::sort(partitions[i].first, partitions[i].second, comp);
//std::cout << " sort"<< std::endl;
}
else if (dummy[i]) // will be handled by first part
{
assert(i > 0);
assert(!dummy[i-1]);
//std::cout << " skip"<< std::endl;
continue;
}
else
{
//std::cout << " split"<< std::endl;
assert(dummy[i+1]);
assert(!final[i+1]);
const unsigned pivot_index = i * (P-1) / (partitions.size()-1);
//std::cerr << "\tp: " << i << " P: " << P << " Npartitions: " << partitions.size() << " pivot_index: " << pivot_index << std::endl;
assert(pivot_index < pivots.size());
assert(!pivot_used[pivot_index]);
pivot_used[pivot_index] = true;
const RandomAccessIterator begin = partitions[i].first;
const RandomAccessIterator end = partitions[i].second;
const RandomAccessIterator middle =
detail::_pivot_range(begin, end, pivots[pivot_index], comp);
partitions[i ] = std::make_pair(begin, middle);
partitions[i+1] = std::make_pair(middle, end);
}
}
for (unsigned i = 0; i < pivot_used.size(); ++i)
assert(pivot_used[i]);
assert(partitions.size() == P);
assert(std::find(pivot_used.begin(), pivot_used.end(), false) == pivot_used.end());
assert(partitions[0].first == first);
for (unsigned i = 0; i < P-1; ++i)
assert(partitions[i].second == partitions[i+1].first);
assert(partitions[P-1].second == last);
// Sort last unsorted partitions
#pragma omp parallel for
for (int i = 0; i < int(partitions.size()); ++i)
if (!final[i])
std::sort(partitions[i].first, partitions[i].second, comp);
}
template<typename RandomAccessIterator>
void sort(RandomAccessIterator first, RandomAccessIterator last, const unsigned P)
{
typedef typename std::iterator_traits<RandomAccessIterator>::value_type VT;
::omptl::sort(first, last, std::less<VT>(), P);
}
/*
template<typename RandomAccessIterator, class StrictWeakOrdering>
void _par_stable_sort(RandomAccessIterator first, RandomAccessIterator last,
StrictWeakOrdering comp, const unsigned P)
{
if ( ::omptl::detail::_nlogn_serial_is_faster(first, last, P) )
{
std::stable_sort(first, last, comp);
return;
}
// Generate pivots
std::vector<typename
std::iterator_traits<RandomAccessIterator>::value_type>
pivots;
_find_pivots(first, last, pivots, P);
// Sort sufficiently to respect pivot order
std::vector< std::pair<RandomAccessIterator, RandomAccessIterator> >
partitions(P);
::omptl::detail::_partition_range_stable_by_pivots(first, last, pivots,
partitions, comp, P);
// Sort
#pragma omp parallel for // default(none) shared(partitions)
for (int t = 0; t < int(P); ++t)
std::stable_sort(partitions[t].first,
partitions[t].second, comp);
}
template<typename RandomAccessIterator, class StrictWeakOrdering>
void _stable_sort(RandomAccessIterator first, RandomAccessIterator last,
StrictWeakOrdering comp, const unsigned P)
{
std::stable_sort(first, last, comp);
}
template<typename RandomAccessIterator>
void _stable_sort(RandomAccessIterator first, RandomAccessIterator last,
std::less<typename
std::iterator_traits<RandomAccessIterator>::value_type>
comp, const unsigned P)
{
::omptl::detail::_par_stable_sort(first, last, comp, P);
}
// template<typename RandomAccessIterator>
// void _stable_sort(RandomAccessIterator first, RandomAccessIterator last,
// std::greater<
// typename std::iterator_traits<RandomAccessIterator>::value_type> comp,
// const unsigned P)
// {
// ::omptl::detail::_par_stable_sort(first, last, comp, P);
// }
*/
template<typename RandomAccessIterator, class StrictWeakOrdering>
void stable_sort(RandomAccessIterator first, RandomAccessIterator last,
StrictWeakOrdering comp, const unsigned P)
{
std::stable_sort(first, last, comp);
}
template<typename RandomAccessIterator>
void stable_sort(RandomAccessIterator first, RandomAccessIterator last, const unsigned P)
{
typedef typename std::iterator_traits<RandomAccessIterator>::value_type VT;
::omptl::stable_sort(first, last, std::less<VT>(), P);
}
template <class ForwardIterator1, class ForwardIterator2>
ForwardIterator2 swap_ranges(ForwardIterator1 first1, ForwardIterator1 last1,
ForwardIterator2 first2, const unsigned P)
{
if (detail::_linear_serial_is_faster(first1, last1, P))
return std::swap_ranges(first1, last1, first2);
std::vector< std::pair<ForwardIterator1, ForwardIterator1> > source_partitions(P);
::omptl::detail::_partition_range(first1, last1, source_partitions, P);
std::vector<ForwardIterator2> dest_partitions(P);
::omptl::detail::_copy_partitions(source_partitions, first2, dest_partitions, P);
ForwardIterator2 result;
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
{
ForwardIterator2 tmp;
*( (t == int(P-1)) ? &result : &tmp )
= std::swap_ranges(source_partitions[t].first,
source_partitions[t].second,
dest_partitions[t]);
}
return result;
}
namespace detail
{
template <class IteratorInTag, class IteratorOutTag>
struct Transform_
{
template <class IteratorIn, class IteratorOut, class UnaryFunction>
static IteratorOut transform(IteratorIn first, IteratorIn last,
IteratorOut result, UnaryFunction op, const unsigned P)
{
if (detail::_linear_serial_is_faster(first, last, P))
return std::transform(first, last, result, op);
std::vector< std::pair<IteratorIn, IteratorIn> > source_partitions(P);
detail::_partition_range(first, last, source_partitions, P);
std::vector<IteratorOut> dest_partitions(P);
detail::_copy_partitions(source_partitions, result, dest_partitions, P);
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
{
IteratorOut tmp;
*( (t == int(P-1)) ? &result : &tmp )
= std::transform(source_partitions[t].first,
source_partitions[t].second,
dest_partitions[t], op);
}
return result;
}
};
template <class IteratorInTag>
struct Transform_<IteratorInTag, std::output_iterator_tag>
{
template <class InputIterator, class OutputIterator,
class UnaryFunction>
static OutputIterator transform(InputIterator first, InputIterator last,
OutputIterator result, UnaryFunction op,
const unsigned P)
{
return std::transform(first, last, result, op);
}
};
template <class IteratorOutTag>
struct Transform_< std::input_iterator_tag, IteratorOutTag >
{
template <class InputIterator, class OutputIterator, class UnaryFunction>
OutputIterator transform(InputIterator first, InputIterator last,
OutputIterator result, UnaryFunction op,
const unsigned P)
{
return std::transform(first, last, result, op);
}
};
template <>
struct Transform_< std::input_iterator_tag, std::output_iterator_tag >
{
template <class InputIterator, class OutputIterator, class UnaryFunction>
OutputIterator transform(InputIterator first, InputIterator last,
OutputIterator result, UnaryFunction op,
const unsigned P)
{
return std::transform(first, last, result, op);
}
};
} // end namespace detail
template <class InputIterator, class OutputIterator, class UnaryFunction>
OutputIterator transform(InputIterator first, InputIterator last,
OutputIterator result, UnaryFunction op,
const unsigned P)
{
return ::omptl::detail::Transform_<
typename std::iterator_traits< InputIterator>::iterator_category,
typename std::iterator_traits<OutputIterator>::iterator_category>::
transform(first, last, result, op, P);
}
namespace detail
{
template <class Iterator1Tag, class Iterator2Tag, class IteratorOutTag>
struct Transform2_
{
template <class Iterator1, class Iterator2, class IteratorOut, class BinaryFunction>
static IteratorOut transform(Iterator1 first1, Iterator1 last1,
Iterator2 first2, IteratorOut result,
BinaryFunction binary_op, const unsigned P)
{
if (detail::_linear_serial_is_faster(first1, last1, P))
return std::transform(first1, last1, first2, result, binary_op);
std::vector< std::pair<Iterator1, Iterator1> > source_partitions1(P);
::omptl::detail::_partition_range(first1, last1, source_partitions1, P);
std::vector<Iterator2> source_partitions2(P);
::omptl::detail::_copy_partitions(source_partitions1, first2, source_partitions2 , P);
std::vector<IteratorOut> dest_partitions(P);
::omptl::detail::_copy_partitions(source_partitions1, result, dest_partitions, P);
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
{
IteratorOut tmp;
*( (t == int(P-1)) ? &result : &tmp ) =
std::transform( source_partitions1[t].first,
source_partitions1[t].second,
source_partitions2[t],
dest_partitions [t], binary_op);
}
return result;
}
};
template <class Iterator2Tag, class IteratorOutTag>
struct Transform2_< std::input_iterator_tag, Iterator2Tag, IteratorOutTag >
{
template <class InputIterator1, class InputIterator2,
class OutputIterator, class BinaryFunction>
static OutputIterator
transform(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, OutputIterator result,
BinaryFunction binary_op, const unsigned P)
{
return std::transform(first1, last1, first2, result, binary_op);
}
};
template <class Iterator1Tag, class IteratorOutTag>
struct Transform2_< Iterator1Tag, std::input_iterator_tag, IteratorOutTag >
{
template <class InputIterator1, class InputIterator2,
class OutputIterator, class BinaryFunction>
static OutputIterator
transform(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, OutputIterator result,
BinaryFunction binary_op, const unsigned P)
{
return std::transform(first1, last1, first2, result, binary_op);
}
};
template <class Iterator1Tag, class Iterator2Tag>
struct Transform2_< Iterator1Tag, Iterator2Tag, std::output_iterator_tag>
{
template <class InputIterator1, class InputIterator2,
class OutputIterator, class BinaryFunction>
static OutputIterator
transform(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, OutputIterator result,
BinaryFunction binary_op, const unsigned P)
{
return std::transform(first1, last1, first2, result, binary_op);
}
};
template <class IteratorOutTag>
struct Transform2_< std::input_iterator_tag,
std::input_iterator_tag, IteratorOutTag >
{
template <class InputIterator1, class InputIterator2,
class OutputIterator, class BinaryFunction>
static OutputIterator
transform(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, OutputIterator result,
BinaryFunction binary_op, const unsigned P)
{
return std::transform(first1, last1, first2, result, binary_op);
}
};
template <class Iterator1Tag>
struct Transform2_< Iterator1Tag, std:: input_iterator_tag,
std::output_iterator_tag >
{
template <class InputIterator1, class InputIterator2,
class OutputIterator, class BinaryFunction>
static OutputIterator
transform(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, OutputIterator result,
BinaryFunction binary_op, const unsigned P)
{
return std::transform(first1, last1, first2, result, binary_op);
}
};
template <class Iterator2Tag>
struct Transform2_< std:: input_iterator_tag, Iterator2Tag,
std::output_iterator_tag >
{
template <class InputIterator1, class InputIterator2,
class OutputIterator, class BinaryFunction>
static OutputIterator
transform(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, OutputIterator result,
BinaryFunction binary_op, const unsigned P)
{
return std::transform(first1, last1, first2, result, binary_op);
}
};
template <>
struct Transform2_< std:: input_iterator_tag, std:: input_iterator_tag,
std::output_iterator_tag >
{
template <class InputIterator1, class InputIterator2,
class OutputIterator, class BinaryFunction>
static OutputIterator
transform(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, OutputIterator result,
BinaryFunction binary_op, const unsigned P)
{
return std::transform(first1, last1, first2, result, binary_op);
}
};
} // end namespace detail
template <class InputIterator1, class InputIterator2, class OutputIterator, class BinaryFunction>
OutputIterator transform(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, OutputIterator result,
BinaryFunction binary_op, const unsigned P)
{
return ::omptl::detail::Transform2_<
typename std::iterator_traits<InputIterator1>::iterator_category,
typename std::iterator_traits<InputIterator2>::iterator_category,
typename std::iterator_traits<OutputIterator>::iterator_category>::
transform(first1, last1, first2, result, binary_op, P);
}
template <class ForwardIterator, class BinaryPredicate>
ForwardIterator unique(ForwardIterator first, ForwardIterator last,
BinaryPredicate binary_pred, const unsigned P)
{
return std::unique(first, last, binary_pred);
}
template <class ForwardIterator>
ForwardIterator unique(ForwardIterator first, ForwardIterator last, const unsigned P)
{
// std::equal_to<typename
// std::iterator_traits<ForwardIterator>::value_type>(),
return std::unique(first, last);
}
template <class InputIterator, class OutputIterator, class BinaryPredicate>
OutputIterator unique_copy(InputIterator first, InputIterator last,
OutputIterator result, BinaryPredicate binary_pred,
const unsigned P)
{
return std::unique_copy(first, last, result, binary_pred);
}
template <class InputIterator, class OutputIterator>
OutputIterator unique_copy(InputIterator first, InputIterator last,
OutputIterator result, const unsigned P)
{
// std::equal_to<typename
// std::iterator_traits<InputIterator>::value_type>(),
return std::unique_copy(first, last, result);
}
template <class ForwardIterator, class T, class StrictWeakOrdering>
ForwardIterator upper_bound(ForwardIterator first, ForwardIterator last,
const T& value, StrictWeakOrdering comp, const unsigned P)
{
if (detail::_logn_serial_is_faster(first, last, P))
return std::upper_bound(first, last, value, comp);
std::vector< std::pair<ForwardIterator, ForwardIterator> > partitions(P);
::omptl::detail::_partition_range(first, last, partitions, P);
std::vector<ForwardIterator> results(P);
#pragma omp parallel for
for (int t = 0; t < int(P); ++t)
results[t] = std::upper_bound(partitions[t].first,
partitions[t].second, value, comp);
// There has to be a better way...
for (unsigned i = P - 1; i > 0; --i)
if (results[i] != partitions[i].second)
return results[i];
return results[0];
}
template <class ForwardIterator, class T>
ForwardIterator upper_bound(ForwardIterator first, ForwardIterator last, const T& value, const unsigned P)
{
typedef typename std::iterator_traits<ForwardIterator>::value_type VT;
return ::omptl::upper_bound(first, last, value, std::less<VT>(), P);
}
} /* namespace omptl */
|
resource_manager.h | // -----------------------------------------------------------------------------
//
// Copyright (C) 2021 CERN & University of Surrey for the benefit of the
// BioDynaMo collaboration. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef CORE_RESOURCE_MANAGER_H_
#define CORE_RESOURCE_MANAGER_H_
#include <omp.h>
#include <sched.h>
#include <algorithm>
#include <cmath>
#include <limits>
#include <memory>
#include <ostream>
#include <set>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "core/agent/agent.h"
#include "core/agent/agent_handle.h"
#include "core/agent/agent_uid.h"
#include "core/agent/agent_uid_generator.h"
#include "core/container/agent_uid_map.h"
#include "core/diffusion/diffusion_grid.h"
#include "core/functor.h"
#include "core/operation/operation.h"
#include "core/simulation.h"
#include "core/type_index.h"
#include "core/util/numa.h"
#include "core/util/root.h"
#include "core/util/thread_info.h"
#include "core/util/type.h"
namespace bdm {
/// ResourceManager stores agents and diffusion grids and provides
/// methods to add, remove, and access them. Agents are uniquely identified
/// by their AgentUid, and AgentHandle. An AgentHandle might change during the
/// simulation.
class ResourceManager {
public:
explicit ResourceManager(TRootIOCtor* r) {}
ResourceManager();
virtual ~ResourceManager();
ResourceManager& operator=(ResourceManager&& other) noexcept {
if (agents_.size() != other.agents_.size()) {
Log::Fatal(
"Restored ResourceManager has different number of NUMA nodes.");
}
for (auto& el : diffusion_grids_) {
delete el.second;
}
for (auto& numa_agents : agents_) {
for (auto* agent : numa_agents) {
delete agent;
}
}
agents_ = std::move(other.agents_);
agents_lb_.resize(agents_.size());
diffusion_grids_ = std::move(other.diffusion_grids_);
RebuildAgentUidMap();
// restore type_index_
if (type_index_) {
for (auto& numa_agents : agents_) {
for (auto* agent : numa_agents) {
type_index_->Add(agent);
}
}
}
return *this;
}
void RebuildAgentUidMap() {
// rebuild uid_ah_map_
uid_ah_map_.clear();
auto* agent_uid_generator = Simulation::GetActive()->GetAgentUidGenerator();
uid_ah_map_.resize(agent_uid_generator->GetHighestIndex() + 1);
for (unsigned n = 0; n < agents_.size(); ++n) {
for (unsigned i = 0; i < agents_[n].size(); ++i) {
auto* agent = agents_[n][i];
this->uid_ah_map_.Insert(agent->GetUid(), AgentHandle(n, i));
}
}
}
Agent* GetAgent(const AgentUid& uid) {
if (!uid_ah_map_.Contains(uid)) {
return nullptr;
}
auto& ah = uid_ah_map_[uid];
return agents_[ah.GetNumaNode()][ah.GetElementIdx()];
}
Agent* GetAgent(AgentHandle ah) {
return agents_[ah.GetNumaNode()][ah.GetElementIdx()];
}
AgentHandle GetAgentHandle(const AgentUid& uid) { return uid_ah_map_[uid]; }
void SwapAgents(std::vector<std::vector<Agent*>>* agents);
void AddDiffusionGrid(DiffusionGrid* dgrid) {
uint64_t substance_id = dgrid->GetSubstanceId();
auto search = diffusion_grids_.find(substance_id);
if (search != diffusion_grids_.end()) {
Log::Fatal("ResourceManager::AddDiffusionGrid",
"You tried to add a diffusion grid with an already existing "
"substance id. Please choose a different substance id.");
} else {
diffusion_grids_[substance_id] = dgrid;
}
}
void RemoveDiffusionGrid(size_t substance_id) {
auto search = diffusion_grids_.find(substance_id);
if (search != diffusion_grids_.end()) {
delete search->second;
diffusion_grids_.erase(search);
} else {
Log::Error("ResourceManager::RemoveDiffusionGrid",
"You tried to remove a diffusion grid that does not exist.");
}
}
/// Return the diffusion grid which holds the substance of specified id
DiffusionGrid* GetDiffusionGrid(size_t substance_id) const {
if (substance_id >= diffusion_grids_.size()) {
Log::Error("DiffusionGrid::GetDiffusionGrid",
"You tried to request diffusion grid '", substance_id,
"', but it does not exist! Make sure that it's the correct id "
"correctly and that the diffusion grid is registered.");
return nullptr;
}
return diffusion_grids_.at(substance_id);
}
/// Return the diffusion grid which holds the substance of specified name
/// Caution: using this function in a tight loop will result in a slow
/// simulation. Use `GetDiffusionGrid(size_t)` in those cases.
DiffusionGrid* GetDiffusionGrid(const std::string& substance_name) const {
for (auto& el : diffusion_grids_) {
auto& dg = el.second;
if (dg->GetSubstanceName() == substance_name) {
return dg;
}
}
Log::Error("DiffusionGrid::GetDiffusionGrid",
"You tried to request a diffusion grid named '", substance_name,
"', but it does not exist! Make sure that it's spelled "
"correctly and that the diffusion grid is registered.");
return nullptr;
}
/// Execute the given functor for all diffusion grids
/// rm->ForEachDiffusionGrid([](DiffusionGrid* dgrid) {
/// ...
/// });
template <typename TFunctor>
void ForEachDiffusionGrid(TFunctor&& f) const {
for (auto& el : diffusion_grids_) {
f(el.second);
}
}
/// Returns the total number of agents if numa_node == -1
/// Otherwise the number of agents in the specific numa node
size_t GetNumAgents(int numa_node = -1) const {
if (numa_node == -1) {
size_t num_agents = 0;
for (auto& numa_agents : agents_) {
num_agents += numa_agents.size();
}
return num_agents;
} else {
return agents_[numa_node].size();
}
}
size_t GetAgentVectorCapacity(int numa_node);
/// Call a function for all or a subset of agents in the simulation.
/// @param function that will be called for each agent
/// @param filter if specified, `function` will only be called for agents
/// for which `filter(agent)` evaluates to true.
///
/// rm->ForEachAgent([](Agent* a) {
/// std::cout << a->GetUid() << std::endl;
/// });
virtual void ForEachAgent(const std::function<void(Agent*)>& function,
Functor<bool, Agent*>* filter = nullptr) {
for (auto& numa_agents : agents_) {
for (auto* agent : numa_agents) {
if (!filter || (filter && (*filter)(agent))) {
function(agent);
}
}
}
}
virtual void ForEachAgent(
const std::function<void(Agent*, AgentHandle)>& function,
Functor<bool, Agent*>* filter = nullptr) {
for (uint64_t n = 0; n < agents_.size(); ++n) {
auto& numa_agents = agents_[n];
for (uint64_t i = 0; i < numa_agents.size(); ++i) {
auto* a = numa_agents[i];
if (!filter || (filter && (*filter)(a))) {
function(a, AgentHandle(n, i));
}
}
}
}
/// Call a function for all or a subset of agents in the simulation.
/// @param function that will be called for each agent
/// @param filter if specified, `function` will only be called for agents
/// for which `filter(agent)` evaluates to true.
/// Function invocations are parallelized.\n
/// Uses static scheduling.
/// \see ForEachAgent
virtual void ForEachAgentParallel(Functor<void, Agent*>& function,
Functor<bool, Agent*>* filter = nullptr);
/// Call an operation for all or a subset of agents in the simulation.
/// Function invocations are parallelized.\n
/// Uses static scheduling.
/// \see ForEachAgent
virtual void ForEachAgentParallel(Operation& op,
Functor<bool, Agent*>* filter = nullptr);
virtual void ForEachAgentParallel(
Functor<void, Agent*, AgentHandle>& function,
Functor<bool, Agent*>* filter = nullptr);
/// Call a function for all or a subset of agents in the simulation.
/// Function invocations are parallelized.\n
/// Uses dynamic scheduling and work stealing. Batch size controlled by
/// `chunk`.
/// \param chunk number of agents that are assigned to a thread (batch
/// size)
/// \see ForEachAgent
virtual void ForEachAgentParallel(
uint64_t chunk, Functor<void, Agent*, AgentHandle>& function,
Functor<bool, Agent*>* filter = nullptr);
/// Reserves enough memory to hold `capacity` number of agents for
/// each numa domain.
void Reserve(size_t capacity) {
for (auto& numa_agents : agents_) {
numa_agents.reserve(capacity);
}
if (type_index_) {
type_index_->Reserve(capacity);
}
}
/// Resize `agents_[numa_node]` such that it holds `current + additional`
/// elements after this call.
/// Returns the size after
uint64_t GrowAgentContainer(size_t additional, size_t numa_node) {
if (additional == 0) {
return agents_[numa_node].size();
}
auto current = agents_[numa_node].size();
if (current + additional < agents_[numa_node].size()) {
agents_[numa_node].reserve((current + additional) * 1.5);
}
agents_[numa_node].resize(current + additional);
return current;
}
/// Returns true if an agent with the given uid is stored in this
/// ResourceManager.
bool ContainsAgent(const AgentUid& uid) const {
return uid_ah_map_.Contains(uid);
}
/// Remove all agents
/// NB: This method is not thread-safe! This function invalidates
/// agent references pointing into the ResourceManager. AgentPointer are
/// not affected.
void ClearAgents() {
uid_ah_map_.clear();
for (auto& numa_agents : agents_) {
for (auto* agent : numa_agents) {
delete agent;
}
numa_agents.clear();
}
if (type_index_) {
type_index_->Clear();
}
}
/// Reorder agents such that, agents are distributed to NUMA
/// nodes. Nearby agents will be moved to the same NUMA node.
virtual void LoadBalance();
void DebugNuma() const;
/// NB: This method is not thread-safe! This function might invalidate
/// agent references pointing into the ResourceManager. AgentPointer are
/// not affected.
void AddAgent(Agent* agent, // NOLINT
typename AgentHandle::NumaNode_t numa_node = 0) {
auto uid = agent->GetUid();
if (uid.GetIndex() >= uid_ah_map_.size()) {
uid_ah_map_.resize(uid.GetIndex() + 1);
}
agents_[numa_node].push_back(agent);
uid_ah_map_.Insert(uid,
AgentHandle(numa_node, agents_[numa_node].size() - 1));
if (type_index_) {
type_index_->Add(agent);
}
}
void ResizeAgentUidMap() {
auto* agent_uid_generator = Simulation::GetActive()->GetAgentUidGenerator();
auto highest_idx = agent_uid_generator->GetHighestIndex();
auto new_size = highest_idx * 1.5 + 1;
if (highest_idx >= uid_ah_map_.size()) {
uid_ah_map_.resize(new_size);
}
if (type_index_) {
type_index_->Reserve(new_size);
}
}
virtual void EndOfIteration() {
// Check if SoUiD defragmentation should be turned on or off
double utilization = static_cast<double>(GetNumAgents()) /
static_cast<double>(uid_ah_map_.size());
auto* sim = Simulation::GetActive();
auto* param = sim->GetParam();
if (utilization < param->agent_uid_defragmentation_low_watermark) {
sim->GetAgentUidGenerator()->EnableDefragmentation(&uid_ah_map_);
} else if (utilization > param->agent_uid_defragmentation_high_watermark) {
sim->GetAgentUidGenerator()->DisableDefragmentation();
}
}
/// Adds `new_agents` to `agents_[numa_node]`. `offset` specifies
/// the index at which the first element is inserted. Agents are inserted
/// consecutively. This methos is thread safe only if insertion intervals do
/// not overlap!
virtual void AddAgents(typename AgentHandle::NumaNode_t numa_node,
uint64_t offset,
const std::vector<Agent*>& new_agents) {
uint64_t i = 0;
for (auto* agent : new_agents) {
auto uid = agent->GetUid();
uid_ah_map_.Insert(uid, AgentHandle(numa_node, offset + i));
agents_[numa_node][offset + i] = agent;
i++;
}
if (type_index_) {
#pragma omp critical
for (auto* agent : new_agents) {
type_index_->Add(agent);
}
}
}
/// Removes the agent with the given uid.\n
/// NB: This method is not thread-safe! This function invalidates
/// agent references pointing into the ResourceManager. AgentPointer are
/// not affected.
void RemoveAgent(const AgentUid& uid) {
// remove from map
if (uid_ah_map_.Contains(uid)) {
auto ah = uid_ah_map_[uid];
uid_ah_map_.Remove(uid);
// remove from vector
auto& numa_agents = agents_[ah.GetNumaNode()];
Agent* agent = nullptr;
if (ah.GetElementIdx() == numa_agents.size() - 1) {
agent = numa_agents.back();
numa_agents.pop_back();
} else {
// swap
agent = numa_agents[ah.GetElementIdx()];
auto* reordered = numa_agents.back();
numa_agents[ah.GetElementIdx()] = reordered;
numa_agents.pop_back();
uid_ah_map_.Insert(reordered->GetUid(), ah);
}
if (type_index_) {
type_index_->Remove(agent);
}
delete agent;
}
}
// \param uids: one vector for each thread containing one vector for each numa
// node
void RemoveAgents(const std::vector<std::vector<AgentUid>*>& uids);
const TypeIndex* GetTypeIndex() const { return type_index_; }
protected:
/// Maps an AgentUid to its storage location in `agents_` \n
AgentUidMap<AgentHandle> uid_ah_map_ = AgentUidMap<AgentHandle>(100u); //!
/// Pointer container for all agents
std::vector<std::vector<Agent*>> agents_;
/// Container used during load balancing
std::vector<std::vector<Agent*>> agents_lb_; //!
/// Maps a diffusion grid ID to the pointer to the diffusion grid
std::unordered_map<uint64_t, DiffusionGrid*> diffusion_grids_;
ThreadInfo* thread_info_ = ThreadInfo::GetInstance(); //!
TypeIndex* type_index_ = nullptr;
struct ParallelRemovalAuxData {
std::vector<std::vector<uint64_t>> to_right;
std::vector<std::vector<uint64_t>> not_to_left;
};
/// auxiliary data required for parallel agent removal
ParallelRemovalAuxData parallel_remove_; //!
friend class SimulationBackup;
friend std::ostream& operator<<(std::ostream& os, const ResourceManager& rm);
BDM_CLASS_DEF_NV(ResourceManager, 2);
};
inline std::ostream& operator<<(std::ostream& os, const ResourceManager& rm) {
os << "\033[1mAgents per numa node\033[0m" << std::endl;
uint64_t cnt = 0;
for (auto& numa_agents : rm.agents_) {
os << "numa node " << cnt++ << " -> size: " << numa_agents.size()
<< std::endl;
}
return os;
}
} // namespace bdm
#endif // CORE_RESOURCE_MANAGER_H_
|
pkzip_fmt_plug.c | /*
* PKZIP patch for john to handle 'old' pkzip passwords (old 'native' format)
*
* Written by Jim Fougeron <jfoug at cox.net> in 2011. No copyright
* is claimed, and the software is hereby placed in the public domain.
* In case this attempt to disclaim copyright and place the software in the
* public domain is deemed null and void, then the software is
* Copyright (c) 2011 Jim Fougeron and it is hereby released to the
* general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*
*/
#include "arch.h"
#if !AC_BUILT
#define HAVE_LIBZ 1 /* legacy build has -lz in LDFLAGS */
#endif
#if HAVE_LIBZ
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pkzip;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pkzip);
#else
#include <string.h>
#include <zlib.h>
#include "common.h"
#include "misc.h"
#include "formats.h"
#define USE_PKZIP_MAGIC 1
#include "pkzip.h"
#include "pkzip_inffixed.h" // This file is a data file, taken from zlib
#include "loader.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL "PKZIP"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define FORMAT_TAG "$pkzip$"
#define FORMAT_TAG2 "$pkzip2$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define FORMAT_TAG2_LEN (sizeof(FORMAT_TAG2)-1)
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1000
#define PLAINTEXT_LENGTH 31
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_SIZE (sizeof(PKZ_SALT*))
#define SALT_ALIGN (sizeof(uint32_t))
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 64
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
//#define ZIP_DEBUG 1
//#define ZIP_DEBUG 2
/*
* It is likely that this should be put into the arch.h files for the different systems,
* IF we find a system which operates faster doing the non-table work.
* However, in current testing, it is always faster to use the multiply table. It only
* takes 16kb, and almost always stays in the cache for any system newer than a 386.
*/
#define PKZIP_USE_MULT_TABLE
#if ARCH_LITTLE_ENDIAN
#define KB1 0
#define KB2 3
#else
#define KB1 3
#define KB2 0
#endif
/*
* filename:$pkzip$C*B*[DT*MT{CL*UL*CR*OF*OX}*CT*DL*CS*DA]*$/pkzip$ (deprecated)
* filename:$pkzip2$C*B*[DT*MT{CL*UL*CR*OF*OX}*CT*DL*CS*TC*DA]*$/pkzip2$ (new format, with 2 checksums)
*
* All numeric and 'binary data' fields are stored in hex.
*
* C is the count of hashes present (the array of items, inside the [] C can be 1 to 3.).
* B is number of valid bytes in the checksum (1 or 2). Unix zip is 2 bytes, all others are 1
* ARRAY of data starts here (there will be C array elements)
* DT is a "Data Type enum". This will be 1 2 or 3. 1 is 'partial'. 2 and 3 are full file data (2 is inline, 3 is load from file).
* MT Magic Type enum. 0 is no 'type'. 255 is 'text'. Other types (like MS Doc, GIF, etc), see source.
* NOTE, CL, DL, CRC, OFF are only present if DT != 1
* CL Compressed length of file blob data (includes 12 byte IV).
* UL Uncompressed length of the file.
* CR CRC32 of the 'final' file.
* OF Offset to the PK\x3\x4 record for this file data. If DT==2, then this will be a 0, as it is not needed, all of the data is already included in the line.
* OX Additional offset (past OF), to get to the zip data within the file.
* END OF 'optional' fields.
* CT Compression type (0 or 8) 0 is stored, 8 is imploded.
* DL Length of the DA data.
* CS Checksum from crc32.
* TC Checksum from timestamp
* DA This is the 'data'. It will be hex data if DT==1 or 2. If DT==3, then it is a filename (name of the .zip file).
* END of array items.
* The format string will end with $/pkzip$
*
* NOTE, after some code testing, it has come to show, that the 'magic' may not be needed, or very useful. The problem with it, is IF the file
* ends up NOT starting with any of the magic values, then we will have a false negative, and NEVER be able to crack the zip's password. For now
* we have a #define (right before the #include "pkzip.h"). If that define is uncommented, then pkzip format will be built with magic logic.
* However, right now it is not being built that way.
*
*/
static struct fmt_tests tests[] = {
/* compression of a perl file. We have the same password, same file used twice in a row (pkzip, 1 byte checksum). NOTE, pkzip uses random IV, so both encrypted blobs are different */
{"\
$pkzip$1*1*2*0*e4*1c5*eda7a8de*0*4c*8*e4*eda7*194883130e4c7419bd735c53dec36f0c4b6de6daefea0f507d67ff7256a49b5ea93ccfd9b12f2ee99053ee0b1c9e1c2b88aeaeb6bd4e60094a1ea118785d4ded6dae94\
cade41199330f4f11b37cba7cda5d69529bdfa43e2700ba517bd2f7ff4a0d4b3d7f2559690ec044deb818c44844d6dd50adbebf02cec663ae8ebb0dde05d2abc31eaf6de36a2fc19fda65dd6a7e449f669d1f8c75e9daa0a3f7b\
e8feaa43bf84762d6dbcc9424285a93cedfa3a75dadc11e969065f94fe3991bc23c9b09eaa5318aa29fa02e83b6bee26cafec0a5e189242ac9e562c7a5ed673f599cefcd398617*$/pkzip$", "password" },
{"\
$pkzip$1*1*2*0*e4*1c5*eda7a8de*0*4c*8*e4*eda7*581f798527109cbadfca0b3318435a000be84366caf9723f841a2b13e27c2ed8cdb5628705a98c3fbbfb34552ed498c51a172641bf231f9948bca304a6be2138ab718f\
6a5b1c513a2fb80c49030ff1a404f7bd04dd47c684317adea4107e5d70ce13edc356c60bebd532418e0855428f9dd582265956e39a0b446a10fd8b7ffb2b4af559351bbd549407381c0d2acc270f3bcaffb275cbe2f628cb09e2\
978e87cd023d4ccb50caaa92b6c952ba779980d65f59f664dde2451cc456d435188be59301a5df1b1b4fed6b7509196334556c44208a9d7e2d9e237f591d6c9fc467b408bf0aaa*$/pkzip$", "password" },
/* Now the same file, compressed twice, using unix zip (info-zip), with 2 byte checksums */
{"\
$pkzip$1*2*2*0*e4*1c5*eda7a8de*0*47*8*e4*4bb6*436c9ffa4328870f6272349b591095e1b1126420c3041744650282bc4f575d0d4a5fc5fb34724e6a1cde742192387b9ed749ab5c72cd6bb0206f102e9216538f095fb7\
73661cfde82c2e2a619332998124648bf4cd0da56279f0c297567d9b5d684125ee92920dd513fd18c27afba2a9633614f75d8f8b9a14095e3fafe8165330871287222e6681dd9c0f830cf5d464457b257d0900eed29107fad8af\
3ac4f87cf5af5183ff0516ccd9aeac1186006c8d11b18742dfb526aadbf2906772fbfe8fb18798967fd397a724d59f6fcd4c32736550986d227a6b447ef70585c049a1a4d7bf25*$/pkzip$", "password" },
{"\
$pkzip$1*2*2*0*e4*1c5*eda7a8de*0*47*8*e4*4bb6*436c9ffa4328870f6272349b591095e1b1126420c3041744650282bc4f575d0d4a5fc5fb34724e6a1cde742192387b9ed749ab5c72cd6bb0206f102e9216538f095fb7\
73661cfde82c2e2a619332998124648bf4cd0da56279f0c297567d9b5d684125ee92920dd513fd18c27afba2a9633614f75d8f8b9a14095e3fafe8165330871287222e6681dd9c0f830cf5d464457b257d0900eed29107fad8af\
3ac4f87cf5af5183ff0516ccd9aeac1186006c8d11b18742dfb526aadbf2906772fbfe8fb18798967fd397a724d59f6fcd4c32736550986d227a6b447ef70585c049a1a4d7bf25*$/pkzip$", "password"},
/* now a pkzip archive, with 3 files, 1 byte checksum */
{"\
$pkzip$3*1*1*0*8*24*4001*8986ec4d693e86c1a42c1bd2e6a994cb0b98507a6ec937fe0a41681c02fe52c61e3cc046*1*0*8*24*4003*a087adcda58de2e14e73db0043a4ff0ed3acc6a9aee3985d7cb81d5ddb32b840ea20\
57d9*2*0*e4*1c5*eda7a8de*0*4c*8*e4*eda7*89a792af804bf38e31fdccc8919a75ab6eb75d1fd6e7ecefa3c5b9c78c3d50d656f42e582af95882a38168a8493b2de5031bb8b39797463cb4769a955a2ba72abe48ee75b103\
f93ef9984ae740559b9bd84cf848d693d86acabd84749853675fb1a79edd747867ef52f4ee82435af332d43f0d0bb056c49384d740523fa75b86a6d29a138da90a8de31dbfa89f2f6b0550c2b47c43d907395904453ddf42a665\
b5f7662de170986f89d46d944b519e1db9d13d4254a6b0a5ac02b3cfdd468d7a4965e4af05699a920e6f3ddcedb57d956a6b2754835b14e174070ba6aec4882d581c9f30*$/pkzip$", "3!files"},
/* following are from CMIYC 2012 */
{"$pkzip$1*1*2*0*163*2b5*cd154083*0*26*8*163*cd15*d6b094794b40116a8b387c10159225d776f815b178186e51faf16fa981fddbffdfa22f6c6f32d2f81dab35e141f2899841991f3cb8d53f8ee1f1d85657f7c7a82ebb2d63182803c6beee00e0bf6c72edeeb1b00dc9f07f917bb8544cc0e96ca01503cd0fb6632c296cebe3fb9b64543925daae6b7ea95cfd27c42f6f3465e0ab2c812b9aeeb15209ce3b691f27ea43a7a77b89c2387e31c4775866a044b6da783af8ddb72784ccaff4d9a246db96484e865ea208ade290b0131b4d2dd21f172693e6b5c90f2eb9b67572b55874b6d3a78763212b248629e744c07871a6054e24ef74b6d779e44970e1619df223b4e5a72a189bef40682b62be6fb7f65e087ca6ee19d1ebfc259fa7e3d98f3cb99347689f8360294352accffb146edafa9e91afba1f119f95145738ac366b332743d4ff40d49fac42b8758c43b0af5b60b8a1c63338359ffbff432774f2c92de3f8c49bd4611e134db98e6a3f2cfb148d2b20f75abab6*$/pkzip$", "passwort"},
{"$pkzip$1*1*2*0*163*2b6*46abc149*0*28*8*163*46ab*0f539b23b761a347a329f362f7f1f0249515f000404c77ec0b0ffe06f29140e8fa3e8e5a6354e57f3252fae3d744212d4d425dc44389dd4450aa9a4f2f3c072bee39d6ac6662620812978f7ab166c66e1acb703602707ab2da96bb28033485ec192389f213e48eda8fc7d9dad1965b097fafebfda6703117db90e0295db9a653058cb28215c3245e6e0f6ad321065bf7b8cc5f66f6f2636e0d02ea35a6ba64bbf0191c308098fd836e278abbce7f10c3360a0a682663f59f92d9c2dcfc87cde2aae27ea18a14d2e4a0752b6b51e7a5c4c8c2bab88f4fb0aba27fb20e448655021bb3ac63752fdb01e6b7c99f9223f9e15d71eb1bd8e323f522fc3da467ff0aae1aa17824085d5d6f1cdfc9c7c689cd7cb057005d94ba691f388484cfb842c8775baac220a5490ed945c8b0414dbfc4589254b856aade49f1aa386db86e9fc87e6475b452bd72c5e2122df239f8c2fd462ca54c1a5bddac36918c5f5cf0cc94aa6ee820*$/pkzip$", "Credit11"},
{"$pkzip$1*1*2*0*163*2b6*46abc149*0*26*8*163*46ab*7ea9a6b07ddc9419439311702b4800e7e1f620b0ab8535c5aa3b14287063557b176cf87a800b8ee496643c0b54a77684929cc160869db4443edc44338294458f1b6c8f056abb0fa27a5e5099e19a07735ff73dc91c6b20b05c023b3ef019529f6f67584343ac6d86fa3d12113f3d374b047efe90e2a325c0901598f31f7fb2a31a615c51ea8435a97d07e0bd4d4afbd228231dbc5e60bf1116ce49d6ce2547b63a1b057f286401acb7c21afbb673f3e26bc1b2114ab0b581f039c2739c7dd0af92c986fc4831b6c294783f1abb0765cf754eada132df751cf94cad7f29bb2fec0c7c47a7177dea82644fc17b455ba2b4ded6d9a24e268fcc4545cae73b14ceca1b429d74d1ebb6947274d9b0dcfb2e1ac6f6b7cd2be8f6141c3295c0dbe25b65ff89feb62cb24bd5be33853b88b8ac839fdd295f71e17a7ae1f054e27ba5e60ca03c6601b85c3055601ce41a33127938440600aaa16cfdd31afaa909fd80afc8690aaf*$/pkzip$", "7J0rdan!!"},
/* CMIYC 2013 "pro" hard hash */
{"$pkzip$1*2*2*0*6b*73*8e687a5b*0*46*8*6b*0d9d*636fedc7a78a7f80cda8542441e71092d87d13da94c93848c230ea43fab5978759e506110b77bd4bc10c95bc909598a10adfd4febc0d42f3cd31e4fec848d6f49ab24bb915cf939fb1ce09326378bb8ecafde7d3fe06b6013628a779e017be0f0ad278a5b04e41807ae9fc*$/pkzip$", "c00rslit3!"},
/* http://corkami.googlecode.com/files/ChristmasGIFts.zip (fixed with 2 byte checksums from timestamp, using new $pkzip2$ type) */
{"$pkzip2$3*2*1*2*8*c0*7224*72f6*6195f9f3401076b22f006105c4323f7ac8bb8ebf8d570dc9c7f13ddacd8f071783f6bef08e09ce4f749af00178e56bc948ada1953a0263c706fd39e96bb46731f827a764c9d55945a89b952f0503747703d40ed4748a8e5c31cb7024366d0ef2b0eb4232e250d343416c12c7cbc15d41e01e986857d320fb6a2d23f4c44201c808be107912dbfe4586e3bf2c966d926073078b92a2a91568081daae85cbcddec75692485d0e89994634c71090271ac7b4a874ede424dafe1de795075d2916eae*1*6*8*c0*26ee*461b*944bebb405b5eab4322a9ce6f7030ace3d8ec776b0a989752cf29569acbdd1fb3f5bd5fe7e4775d71f9ba728bf6c17aad1516f3aebf096c26f0c40e19a042809074caa5ae22f06c7dcd1d8e3334243bca723d20875bd80c54944712562c4ff5fdb25be5f4eed04f75f79584bfd28f8b786dd82fd0ffc760893dac4025f301c2802b79b3cb6bbdf565ceb3190849afdf1f17688b8a65df7bc53bc83b01a15c375e34970ae080307638b763fb10783b18b5dec78d8dfac58f49e3c3be62d6d54f9*2*0*2a*1e*4a204eab*ce8*2c*0*2a*4a20*7235*6b6e1a8de47449a77e6f0d126b217d6b2b72227c0885f7dc10a2fb3e7cb0e611c5c219a78f98a9069f30*$/pkzip2$", "123456"},
{NULL}
};
/* these static fields are used in the crypt_all loop, and the cmp_all/cmp_one we */
/* perform the pkzip 'checksum' checking. If we do get a 'hit', then that pass & */
/* salt pair is checked fully within the cmp_exact, where it gets inflated and */
/* checked (possibly also a 'sample TEXT record is done first, as a quick check */
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static u32 *K12;
static PKZ_SALT *salt;
static u8 *chk;
static int dirty=1;
#if USE_PKZIP_MAGIC
static ZIP_SIGS SIGS[256];
#endif
#ifdef PKZIP_USE_MULT_TABLE
static u8 mult_tab[16384];
#define PKZ_MULT(b,w) b^mult_tab[(u16)(w.u)>>2]
#else
inline u8 PKZ_MULT(u8 b, MY_WORD w) {u16 t = w.u|2; return b ^ (u8)(((u16)(t*(t^1))>>8)); }
#endif
extern struct fmt_main fmt_pkzip;
static const char *ValidateZipContents(FILE *in, long offset, u32 offex, int len, u32 crc);
/* Since the pkzip format textual representation is pretty complex, with multiple */
/* 'optional' sections, we have a VERY complete valid. Valid will make SURE that */
/* the format is completely valid. Thus, there is little or no error checking later */
/* in the rest of the code. It 'should' not be needed, and is done here. There is */
/* a little error checking later in the file, for some of the file opening stuff, */
/* since the file can change from the time of this 'valid' call, until when the data */
/* is actually read from the file. */
/* */
/* NOTE, we may want to later make a 'prepare()' function, and do all file loading */
/* there, so that we have a 'complete' format line, with the zip data contained. */
static int valid(char *ciphertext, struct fmt_main *self)
{
c8 *p, *cp, *cpkeep;
int cnt, ret=0;
u64 data_len;
u32 crc;
FILE *in;
const char *sFailStr;
long offset;
u32 offex;
int type;
u64 complen = 0;
int type2 = 0;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) {
if (!strncmp(ciphertext, FORMAT_TAG2, FORMAT_TAG2_LEN))
type2 = 1;
else
return ret;
}
cpkeep = strdup(ciphertext);
cp = cpkeep;
p = &cp[FORMAT_TAG_LEN];
if (type2)
++p;
if ((cp = strtokm(p, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Out of data, reading count of hashes field";
goto Bail;
}
sscanf(cp, "%x", &cnt);
if (cnt < 1 || cnt > MAX_PKZ_FILES) {
sFailStr = "Count of hashes field out of range";
goto Bail;
}
if ((cp = strtokm(NULL, "*")) == NULL || cp[0] < '0' || cp[0] > '2' || cp[1]) {
sFailStr = "Number of valid hash bytes empty or out of range";
goto Bail;
}
while (cnt--) {
if ((cp = strtokm(NULL, "*")) == NULL || cp[0]<'1' || cp[0]>'3' || cp[1]) {
sFailStr = "Invalid data enumeration type";
goto Bail;
}
type = cp[0] - '0';
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid type enumeration";
goto Bail;
}
if (type > 1) {
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid compressed length";
goto Bail;
}
sscanf(cp, "%"PRIx64, &complen);
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid data length value";
goto Bail;
}
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid CRC value";
goto Bail;
}
sscanf(cp, "%x", &crc);
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid offset length";
goto Bail;
}
sscanf(cp, "%lx", &offset);
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid offset length";
goto Bail;
}
sscanf(cp, "%x", &offex);
}
if ((cp = strtokm(NULL, "*")) == NULL || (cp[0] != '0' && cp[0] != '8') || cp[1]) {
sFailStr = "Compression type enumeration";
goto Bail;
}
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid data length value";
goto Bail;
}
sscanf(cp, "%"PRIx64, &data_len);
if ((cp = strtokm(NULL, "*")) == NULL || !ishexlc(cp) || strlen(cp) != 4) {
sFailStr = "invalid checksum value";
goto Bail;
}
if (type2) {
if ((cp = strtokm(NULL, "*")) == NULL || !ishexlc(cp) || strlen(cp) != 4) {
sFailStr = "invalid checksum2 value";
goto Bail;}
}
if ((cp = strtokm(NULL, "*")) == NULL) goto Bail;
if (type > 1) {
if (type == 3) {
if ( strlen(cp) != data_len) {
sFailStr = "invalid checksum value";
goto Bail;
}
in = fopen(cp, "rb"); /* have to open in bin mode for OS's where this matters, DOS/Win32 */
if (!in) {
/* this error is listed, even if not in pkzip debugging mode. */
/* But not if we're just reading old pot lines */
if (!ldr_in_pot)
fprintf(stderr, "Error loading a pkzip hash line. The ZIP file '%s' could NOT be found\n", cp);
return 0;
}
sFailStr = ValidateZipContents(in, offset, offex, complen, crc);
if (*sFailStr) {
/* this error is listed, even if not in pkzip debugging mode. */
fprintf(stderr, "pkzip validation failed [%s] Hash is %s\n", sFailStr, ciphertext);
fclose(in);
return 0;
}
fseek(in, offset+offex, SEEK_SET);
if (complen < 16*1024) {
/* simply load the whole blob */
void *tbuf = mem_alloc(complen);
if (fread(tbuf, 1, complen, in) != complen) {
MEM_FREE(tbuf);
fclose(in);
return 0;
}
data_len = complen;
MEM_FREE(tbuf);
}
fclose(in);
} else {
/* 'inline' data. */
if (complen != data_len) {
sFailStr = "length of full data does not match the salt len";
goto Bail;
}
if (!ishexlc(cp) || strlen(cp) != data_len<<1) {
sFailStr = "invalid inline data";
goto Bail;
}
}
} else {
if (!ishexlc(cp) || strlen(cp) != data_len<<1) {
sFailStr = "invalid partial data";
goto Bail;
}
}
}
if ((cp = strtokm(NULL, "*")) == NULL) goto Bail;
if (strtokm(NULL, "") != NULL) goto Bail;
if (type2) ret = !strcmp(cp, "$/pkzip2$");
else ret = !strcmp(cp, "$/pkzip$");
Bail:;
#ifdef ZIP_DEBUG
if (!ret) fprintf(stderr, "pkzip validation failed [%s] Hash is %.64s\n", sFailStr, ciphertext);
#endif
MEM_FREE(cpkeep);
return ret;
}
static const char *ValidateZipContents(FILE *fp, long offset, u32 offex, int _len, u32 _crc)
{
u32 id;
u16 version, flags, method, modtm, moddt, namelen, exlen;
u32 crc, complen, uncomplen;
if (fseek(fp, offset, SEEK_SET) != 0)
return "Not able to seek to specified offset in the .zip file, to read the zip blob data.";
id = fget32LE(fp);
if (id != 0x04034b50U)
return "Compressed zip file offset does not point to start of zip blob";
/* Ok, see if this IS the correct file blob. */
version = fget16LE(fp);
flags = fget16LE(fp);
method = fget16LE(fp);
modtm = fget16LE(fp);
moddt = fget16LE(fp);
crc = fget32LE(fp);
complen = fget32LE(fp);
uncomplen = fget32LE(fp);
namelen = fget16LE(fp);
exlen = fget16LE(fp);
/* unused vars. */
(void)uncomplen;
(void)modtm;
(void)moddt;
/* Even if we 'miss', we keep walking back. We 'can' miss if the CRC of file, or some other */
/* binary data happens to have the 0x04034b50 signature, thus giving us a false local header hit. */
if (_crc == crc && _len == complen && (0x14 == version || 0xA == version) && (flags & 1) && (method == 8 || method == 0) && offex==30+namelen+exlen)
return "";
return "We could NOT find the internal zip data in this ZIP file";
}
static u8 *buf_copy (char *p, int len)
{
u8 *op = mem_alloc_tiny(len, MEM_ALIGN_NONE);
memcpy(op, p, len);
return op;
}
static void init(struct fmt_main *self)
{
#ifdef PKZIP_USE_MULT_TABLE
unsigned short n=0;
#endif
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
K12 = mem_calloc(sizeof(*K12) * 3, self->params.max_keys_per_crypt);
chk = mem_calloc(sizeof(*chk), self->params.max_keys_per_crypt);
/*
* Precompute the multiply mangling, within several parts of the hash. There is a pattern,
* 64k entries long. However the exact same value is produced 4 times in a row, every
* time. Thus, we can build a 16k wide array, and then access the array using this
* ((val&0xFFFF) >> 2) This is faster on all current HW, since the 16kb array access
* (and the and/shift) is faster than performing the whole mult, 2 shifts, 2 adds and
* an and (if the compiler can optimize it to that)
*
* There is a # define at the top of this file that turns this OFF. if that define is
* not set, then these mult's will be done in the crypt_all and decrypt functions
*/
#ifdef PKZIP_USE_MULT_TABLE
for (n = 0; n < 16384; n++)
mult_tab[n] = (((unsigned)(n*4+3) * (n*4+2)) >> 8) & 0xff;
#endif
#if USE_PKZIP_MAGIC
//static char *MagicTypes[]= { "", "DOC", "XLS", "DOT", "XLT", "EXE", "DLL", "ZIP", "BMP", "DIB", "GIF", "PDF", "GZ", "TGZ", "BZ2", "TZ2", "FLV", "SWF", "MP3", NULL };
//static int MagicToEnum[] = {0, 1, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 7, 7, 8, 8, 9, 10, 11, 0};
// decent sources of these:
// http://www.garykessler.net/library/file_sigs.html
// http://en.wikipedia.org/wiki/List_of_file_signatures
// http://toorcon.techpathways.com/uploads/headersig.txt
// not available, 2012-12-28)
// archive.org still has a version:
// http://web.archive.org/web/20110725085828/http://toorcon.techpathways.com/uploads/headersig.txt
// there are many more.
//case 1: // DOC/XLS
SIGS[1].magic_signature[0] = (u8*)str_alloc_copy("\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1");
SIGS[1].magic_sig_len[0] = 8;
SIGS[1].magic_signature[1] = buf_copy("\x50\x4B\x03\x04\x14\x00\x06\x00\x08", 10); // a .zip file 'sort of'
SIGS[1].magic_sig_len[1] = 9;
SIGS[1].magic_signature[2] = buf_copy("\x09\x04\x06\x00\x00\x00\x10\x00\xF6\x05\x5C\x00", 13); // older XLS format (office 95)
SIGS[1].magic_sig_len[2] = 12;
SIGS[1].magic_signature[3] = buf_copy("\x09\x02\x06\x00\x00\x00\x10\x00\xB9\x04\x5C\x00", 13); // older XLS v2
SIGS[1].magic_sig_len[3] = 12;
SIGS[1].magic_signature[4] = buf_copy("\x50\x4B\x03\x04\x14\x00\x00\x00\x00\x00", 11); //DOC Star Writer 6.0
SIGS[1].magic_sig_len[4] = 10;
SIGS[1].magic_signature[5] = buf_copy("\x31\xBE\x00\x00\x00\xAB\x00\x00", 9); //DOC MS Word for DOS v6 File
SIGS[1].magic_sig_len[5] = 8;
SIGS[1].magic_signature[6] = (u8*)str_alloc_copy("\x12\x34\x56\x78\x90\xFF"); //DOC MS Word 6.0 File
SIGS[1].magic_sig_len[6] = 6;
SIGS[1].magic_signature[7] = (u8*)str_alloc_copy("\x7F\xFE\x34\x0A"); //MS Word File
SIGS[1].magic_sig_len[7] = 4;
SIGS[1].magic_count = 8;
SIGS[1].max_len = 12;
//case 2: // Win32/DOS exe file MZ
SIGS[2].magic_signature[0] = (u8*)str_alloc_copy("MZ");
SIGS[2].magic_sig_len[0] = 2;
SIGS[2].magic_count = 1;
SIGS[2].max_len = 2;
//case 3: // PKZIP
SIGS[3].magic_signature[0] = (u8*)str_alloc_copy("\x50\x4B\x03\x04");
SIGS[3].magic_sig_len[0] = 4;
SIGS[3].magic_count = 1;
SIGS[3].max_len = 4;
//case 4: // BMP
SIGS[4].magic_signature[0] = (u8*)str_alloc_copy("BM");
SIGS[4].magic_sig_len[0] = 2;
SIGS[4].magic_count = 1;
SIGS[4].max_len = 2;
//case 5: // GIF
SIGS[5].magic_signature[0] = (u8*)str_alloc_copy("GIF87a");
SIGS[5].magic_sig_len[0] = 6;
SIGS[5].magic_signature[1] = (u8*)str_alloc_copy("GIF89a");
SIGS[5].magic_sig_len[1] = 6;
SIGS[5].magic_count = 2;
SIGS[5].max_len = 6;
//case 6: // PDF
SIGS[6].magic_signature[0] = (u8*)str_alloc_copy("%PDF");
SIGS[6].magic_sig_len[0] = 4;
SIGS[6].magic_count = 1;
SIGS[6].max_len = 4;
//case 7: // GZ
SIGS[7].magic_signature[0] = (u8*)str_alloc_copy("\x1F\x8B\x08");
SIGS[7].magic_sig_len[0] = 3;
SIGS[7].magic_count = 1;
SIGS[7].max_len = 3;
//case 8: // BZ2 (there is a 'magic' pi, but byte 4 is 1 to 9, so skip the 'pi')
SIGS[8].magic_signature[0] = (u8*)str_alloc_copy("BZh");
SIGS[8].magic_sig_len[0] = 3;
SIGS[8].magic_signature[1] = (u8*)str_alloc_copy("BZ0");
SIGS[8].magic_sig_len[1] = 3;
SIGS[8].magic_count = 2;
SIGS[8].max_len = 3;
//case 9: // FLV
SIGS[9].magic_signature[0] = (u8*)str_alloc_copy("FLV\x01");
SIGS[9].magic_sig_len[0] = 4;
SIGS[9].magic_count = 1;
SIGS[9].max_len = 4;
//case 10: // SWF
SIGS[10].magic_signature[0] = (u8*)str_alloc_copy("FWS");
SIGS[10].magic_sig_len[0] = 3;
SIGS[10].magic_signature[1] = (u8*)str_alloc_copy("CWS");
SIGS[10].magic_sig_len[1] = 3;
SIGS[10].magic_signature[2] = (u8*)str_alloc_copy("ZWS");
SIGS[10].magic_sig_len[2] = 3;
SIGS[10].magic_count = 3;
SIGS[10].max_len = 3;
//case 11: // MP3
SIGS[11].magic_signature[0] = (u8*)str_alloc_copy("ID3");
SIGS[11].magic_sig_len[0] = 3;
SIGS[11].magic_count = 1;
SIGS[11].max_len = 3;
SIGS[255].max_len = 64;
#endif
}
static void done(void)
{
MEM_FREE(chk);
MEM_FREE(K12);
MEM_FREE(saved_key);
}
static void set_salt(void *_salt)
{
salt = *((PKZ_SALT**)_salt);
if (salt->H[0].h && salt->H[1].h && salt->H[2].h)
return;
// we 'late' fixup the salt.
salt->H[0].h = &salt->zip_data[0];
salt->H[1].h = &salt->zip_data[1+salt->H[0].datlen];
salt->H[2].h = &salt->zip_data[2+salt->H[0].datlen+salt->H[1].datlen];
}
static void *get_salt(char *ciphertext)
{
/* NOTE, almost NO error checking at all in this function. Proper error checking done in valid() */
static union alignment {
unsigned char c[8];
uint32_t a[1];
} a;
unsigned char *salt_p = a.c;
PKZ_SALT *salt, *psalt;
long offset=0;
char *H[3] = {0,0,0};
long ex_len[3] = {0,0,0};
u32 offex;
size_t i, j;
c8 *p, *cp, *cpalloc = (char*)mem_alloc(strlen(ciphertext)+1);
int type2 = 0;
/* Needs word align on REQ_ALIGN systems. May crash otherwise (in the sscanf) */
salt = mem_calloc(1, sizeof(PKZ_SALT));
cp = cpalloc;
strcpy(cp, ciphertext);
if (!strncmp(cp, FORMAT_TAG, FORMAT_TAG_LEN))
p = &cp[FORMAT_TAG_LEN];
else {
p = &cp[FORMAT_TAG2_LEN];
type2 = 1;
}
cp = strtokm(p, "*");
sscanf(cp, "%x", &(salt->cnt));
cp = strtokm(NULL, "*");
sscanf(cp, "%x", &(salt->chk_bytes));
for (i = 0; i < salt->cnt; ++i) {
int data_enum;
cp = strtokm(NULL, "*");
data_enum = *cp - '0';
cp = strtokm(NULL, "*");
#if USE_PKZIP_MAGIC
{
// mingw can't handle %hhx. Use 'normal' %x and assign back to uint_8 var
unsigned jnk;
sscanf(cp, "%x", &jnk);
salt->H[i].magic = (unsigned char)jnk;
}
salt->H[i].pSig = &SIGS[salt->H[i].magic];
#endif
if (data_enum > 1) {
cp = strtokm(NULL, "*");
sscanf(cp, "%"PRIx64, &(salt->compLen));
cp = strtokm(NULL, "*");
sscanf(cp, "%"PRIx64, &(salt->deCompLen));
cp = strtokm(NULL, "*");
sscanf(cp, "%x", &(salt->crc32));
cp = strtokm(NULL, "*");
sscanf(cp, "%lx", &offset);
cp = strtokm(NULL, "*");
sscanf(cp, "%x", &offex);
}
cp = strtokm(NULL, "*");
sscanf(cp, "%x", &(salt->H[i].compType));
cp = strtokm(NULL, "*");
sscanf(cp, "%"PRIx64, &(salt->H[i].datlen));
cp = strtokm(NULL, "*");
for (j = 0; j < 4; ++j) {
salt->H[i].c <<= 4;
salt->H[i].c |= atoi16[ARCH_INDEX(cp[j])];
}
if (type2) {
cp = strtokm(NULL, "*");
for (j = 0; j < 4; ++j) {
salt->H[i].c2 <<= 4;
salt->H[i].c2 |= atoi16[ARCH_INDEX(cp[j])];
}
} else
salt->H[i].c2 = salt->H[i].c; // fake out 2nd hash, by copying first hash
cp = strtokm(NULL, "*");
if (data_enum > 1) {
/* if 2 or 3, we have the FULL zip blob for decrypting. */
if (data_enum == 3) {
/* read from file. */
FILE *fp;
fp = fopen(cp, "rb");
if (!fp) {
fprintf(stderr, "Error opening file for pkzip data: %s\n", cp);
MEM_FREE(cpalloc);
return 0;
}
fseek(fp, offset+offex, SEEK_SET);
if (salt->compLen < 16*1024) {
/* simply load the whole blob */
ex_len[i] = salt->compLen;
H[i] = mem_alloc(salt->compLen);
if (fread(H[i], 1, salt->compLen, fp) != salt->compLen) {
fprintf(stderr, "Error reading zip file for pkzip data: %s\n", cp);
fclose(fp);
MEM_FREE(cpalloc);
return 0;
}
fclose(fp);
salt->H[i].datlen = salt->compLen;
}
else {
/* Only load a small part (to be used in crypt_all), and set the filename in */
/* the salt->fname string, so that cmp_all can open the file, and buffered */
/* read the zip data only when it 'needs' it. */
strnzcpy(salt->fname, (const char *)cp, sizeof(salt->fname));
salt->offset = offset+offex;
ex_len[i] = 384;
H[i] = mem_alloc(384);
if (fread(H[i], 1, 384, fp) != 384) {
fprintf(stderr, "Error reading zip file for pkzip data: %s\n", cp);
fclose(fp);
MEM_FREE(cpalloc);
return 0;
}
fclose(fp);
salt->H[i].datlen = 384;
}
} else {
ex_len[i] = salt->compLen;
H[i] = mem_alloc(salt->compLen);
for (j = 0; j < salt->H[i].datlen; ++j)
H[i][j] = (atoi16[ARCH_INDEX(cp[j*2])]<<4) + atoi16[ARCH_INDEX(cp[j*2+1])];
}
/* we also load this into the 'building' salt */
salt->compType = salt->H[i].compType;
/* Now, set the 'is full zip' flag, so we later process as a zip file. */
salt->H[i].full_zip = 1;
salt->full_zip_idx = i;
} else {
ex_len[i] = salt->H[i].datlen;
H[i] = mem_alloc(salt->H[i].datlen);
for (j = 0; j < salt->H[i].datlen; ++j)
H[i][j] = (atoi16[ARCH_INDEX(cp[j*2])]<<4) + atoi16[ARCH_INDEX(cp[j*2+1])];
}
}
MEM_FREE(cpalloc);
// Ok, we want to add some 'logic' to remove the magic testing, except for specific cases.
// If the only file blobs we have are stored, and long blobs, then we want magic (3 file, 2 byte checksum does not need magic).
// A single 1 byte file, even if deflated, we want to keep magic. (possibly).
j = 0;
for (i = 0; i < salt->cnt; ++i) {
if (salt->H[i].compType == 8) {
if (salt->cnt == 1 && salt->chk_bytes == 1)
j += 10;
else
break;
}
j += 1;
}
// ok, if j == 1, then we 'might' want to use magic. Otherwise, we want to 'clear' all magic values.
if (j >= 20)
j = 0;
if (j && salt->chk_bytes == 2 && salt->cnt > 1)
j = 0; // we do not need to use magic, on 2 or 3 stored 2 byte checksum files. We already have 2^32 or 2^48 in the checksum checking
if (j && salt->chk_bytes == 1 && salt->cnt == 3)
j = 0; // we do not need to use magic, on 3 stored 2 byte checksum files. We already have 2^32 or 2^48 in the checksum checking
if (!j) {
for (i = 0; i < salt->cnt; ++i)
salt->H[i].magic = 0; // remove any 'magic' logic from this hash.
}
psalt = mem_calloc(1, sizeof(PKZ_SALT) + ex_len[0]+ex_len[1]+ex_len[2]+2);
memcpy(psalt, salt, sizeof(*salt));
memcpy(psalt->zip_data, H[0], ex_len[0]);
MEM_FREE(H[0]);
if (salt->cnt > 1)
memcpy(psalt->zip_data+ex_len[0]+1, H[1], ex_len[1]);
MEM_FREE(H[1]);
if (salt->cnt > 2)
memcpy(psalt->zip_data+ex_len[0]+ex_len[1]+2, H[2], ex_len[2]);
MEM_FREE(H[2]);
MEM_FREE(salt);
psalt->dsalt.salt_alloc_needs_free = 1; // we used mem_calloc, so JtR CAN free our pointer when done with them.
// set the JtR core linkage stuff for this dyna_salt
memcpy(salt_p, &psalt, sizeof(psalt));
psalt->dsalt.salt_cmp_offset = SALT_CMP_OFF(PKZ_SALT, cnt);
psalt->dsalt.salt_cmp_size =
SALT_CMP_SIZE(PKZ_SALT, cnt, zip_data, ex_len[0]+ex_len[1]+ex_len[2]+2);
return salt_p;
}
static void set_key(char *key, int index)
{
/* Keep the PW, so we can return it in get_key if asked to do so */
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1);
dirty = 1;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int cmp_one(void *binary, int idx)
{
return chk[idx] == 1;
}
static int cmp_all(void *binary, int count)
{
int i,j;
for (i=j=0; i<count; ++i)
j+=chk[i]; /* hopefully addition like this is faster than 'count' conditional if statments */
return j;
}
/* this function is used by cmp_exact_loadfile. It will load the next
* part of the file then decrypt the data, and return just how many
* bytes were loaded.
*
* This function is 'similar' to an fread(). However, it also decrypts data
*/
static int get_next_decrypted_block(u8 *in, int sizeof_n, FILE *fp, u32 *inp_used, MY_WORD *pkey0, MY_WORD *pkey1, MY_WORD *pkey2)
{
u32 new_bytes = sizeof_n, k;
u8 C;
/* we have read all the bytes, we're done */
if (*inp_used >= salt->compLen)
return 0;
if (*inp_used + new_bytes > salt->compLen)
/* this is the last block. Only load the bytes that are left */
new_bytes = salt->compLen - *inp_used;
/* return the correct 'offset', so we can track when the file buffer has been fully read */
*inp_used += new_bytes;
/* read the data */
if (fread(in, 1, new_bytes, fp) != new_bytes)
return 0;
/* decrypt the data bytes (in place, in same buffer). Easy to do, only requires 1 temp character variable. */
for (k = 0; k < new_bytes; ++k) {
C = PKZ_MULT(in[k],(*pkey2));
pkey0->u = jtr_crc32 (pkey0->u, C);
pkey1->u = (pkey1->u + pkey0->c[KB1]) * 134775813 + 1;
pkey2->u = jtr_crc32 (pkey2->u, pkey1->c[KB2]);
in[k] = C;
}
/* return the number of bytes we read from the file on this read */
return new_bytes;
}
/* Ok, this is the more complex example. Here we have to load the file (which may be HUGE)
* decrypt the bytes from this file, and then inflate that data, and crc the bytes which we
* have inflated from that stream. Then in the end, when we use all input bytes, if we have
* inflated the right amount of data, ended up with a Z_STREAM_END, and the proper sized
* decompression buffer, and the CRC matches, then we know we have the correct password
*
* This function is called from cmp_exact(), when cmp_exact finds out we have to decrypt from
* the stored .zip file.
*
* this code is modifications made to the zpipe.c 'example' code from the zlib web site.
*/
#define CHUNK (64*1024)
static int cmp_exact_loadfile(int index)
{
int ret;
u32 have, k;
z_stream strm;
unsigned char in[CHUNK];
unsigned char out[CHUNK];
FILE *fp;
MY_WORD key0, key1, key2;
u8 *b, C;
u32 inp_used, decomp_len=0;
u32 crc = 0xFFFFFFFF;
/* Open the zip file, and 'seek' to the proper offset of the binary zip blob */
fp = fopen(salt->fname, "rb");
if (!fp) {
fprintf(stderr, "\nERROR, the zip file: %s has been removed.\nWe are a possible password has been found, but FULL validation can not be done!\n", salt->fname);
return 1;
}
if (fseek(fp, salt->offset, SEEK_SET)) {
fprintf(stderr, "\nERROR, the zip file: %s fseek() failed.\nWe are a possible password has been found, but FULL validation can not be done!\n", salt->fname);
fclose(fp);
return 1;
}
/* 'seed' the decryption with the IV. We do NOT use these bytes, they simply seed us. */
key0.u = K12[index*3], key1.u = K12[index*3+1], key2.u = K12[index*3+2];
k=12;
if (fread(in, 1, 12, fp) != 12) {
fprintf(stderr, "\nERROR, the zip file: %s fread() failed.\nWe are a possible password has been found, but FULL validation can not be done!\n", salt->fname);
fclose(fp);
return 1;
}
b = salt->H[salt->full_zip_idx].h;
do {
C = PKZ_MULT(*b++,key2);
key0.u = jtr_crc32 (key0.u, C);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
}
while(--k);
/* this is 'sort of' our file pointer. It is the 'index' into the file's encrypted, compressed data buffer. */
/* we have read the 12 bytes of IV data, and updated our keys. Now we start processing the rest of the bytes */
/* to get the data to inflate, and crc check */
inp_used = 12;
if (salt->H[salt->full_zip_idx].compType == 0) {
// handle a stored blob (we do not have to decrypt it.
int avail_in;
crc = 0xFFFFFFFF;
avail_in = get_next_decrypted_block(in, CHUNK, fp, &inp_used, &key0, &key1, &key2);
while (avail_in) {
for (k = 0; k < avail_in; ++k)
crc = jtr_crc32(crc,in[k]);
avail_in = get_next_decrypted_block(in, CHUNK, fp, &inp_used, &key0, &key1, &key2);
}
fclose(fp);
return ~crc == salt->crc32;
}
/* allocate inflate state */
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
strm.avail_in = 0;
strm.next_in = Z_NULL;
ret = inflateInit2(&strm, -15);
if (ret != Z_OK) /* if zlib is hosed, then likely there is no reason at all to continue. Better to exit, and let the user 'fix' the system */
perror("Error, initializing the libz inflateInit2() system\n");
/* decompress until deflate stream ends or end of file */
do {
strm.avail_in = get_next_decrypted_block(in, CHUNK, fp, &inp_used, &key0, &key1, &key2);
if (ferror(fp)) {
inflateEnd(&strm);
fclose(fp);
fprintf(stderr, "\nERROR, the zip file: %s fread() failed.\nWe are a possible password has been found, but FULL validation can not be done!\n", salt->fname);
return 1;
}
if (strm.avail_in == 0)
break;
strm.next_in = in;
/* run inflate() on input until output buffer not full */
do {
strm.avail_out = CHUNK;
strm.next_out = out;
ret = inflate(&strm, Z_NO_FLUSH);
switch (ret) {
case Z_NEED_DICT:
case Z_DATA_ERROR:
case Z_MEM_ERROR:
inflateEnd(&strm);
fclose(fp);
return 0;
}
have = CHUNK - strm.avail_out;
/* now update our crc value */
for (k = 0; k < have; ++k)
crc = jtr_crc32(crc,out[k]);
decomp_len += have;
} while (strm.avail_out == 0);
/* done when inflate() says it's done */
} while (ret != Z_STREAM_END);
/* clean up and return */
inflateEnd(&strm);
fclose(fp);
return ret == Z_STREAM_END && inp_used == salt->compLen && decomp_len == salt->deCompLen && salt->crc32 == ~crc;
}
static int cmp_exact(char *source, int index)
{
const u8 *b;
u8 C, *decompBuf, *decrBuf, *B;
u32 k, crc;
MY_WORD key0, key1, key2;
z_stream strm;
int ret;
if (salt->H[salt->full_zip_idx].full_zip == 0)
/* we do not have a zip file, this is 'checksum' only
* POSSIBLY, we should log and output to screen that
* we are not 100% 'sure' we have the right password!! */
return 1;
#ifdef ZIP_DEBUG
fprintf(stderr, "FULL zip test being done. (pass=%s)\n", saved_key[index]);
#endif
if (salt->fname[0] == 0) {
/* we have the whole zip blob in memory, simply allocate a decrypt buffer, decrypt
* in one step, crc and be done with it. This is the 'trivial' type. */
decrBuf = mem_alloc(salt->compLen-12);
key0.u = K12[index*3], key1.u = K12[index*3+1], key2.u = K12[index*3+2];
b = salt->H[salt->full_zip_idx].h;
k=12;
do {
C = PKZ_MULT(*b++,key2);
key0.u = jtr_crc32 (key0.u, C);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
}
while(--k);
B = decrBuf;
k = salt->compLen-12;
do {
C = PKZ_MULT(*b++,key2);
key0.u = jtr_crc32 (key0.u, C);
*B++ = C;
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
} while (--k);
if (salt->H[salt->full_zip_idx].compType == 0) {
// handle a stored blob (we do not have to decrypt it.
crc = 0xFFFFFFFF;
for (k = 0; k < salt->compLen-12; ++k)
crc = jtr_crc32(crc,decrBuf[k]);
MEM_FREE(decrBuf);
return ~crc == salt->crc32;
}
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
strm.next_in = Z_NULL;
strm.avail_in = 0;
ret = inflateInit2(&strm, -15); /* 'raw', since we do not have gzip header, or gzip crc. .ZIP files are 'raw' implode data. */
if (ret != Z_OK)
perror("Error, initializing the libz inflateInit2() system\n");
decompBuf = mem_alloc(salt->deCompLen);
strm.next_in = decrBuf;
strm.avail_in = salt->compLen-12;
strm.avail_out = salt->deCompLen;
strm.next_out = decompBuf;
ret = inflate(&strm, Z_SYNC_FLUSH);
inflateEnd(&strm);
if (ret != Z_STREAM_END || strm.total_out != salt->deCompLen) {
MEM_FREE(decompBuf);
MEM_FREE(decrBuf);
return 0;
}
crc = 0xFFFFFFFF;
for (k = 0; k < strm.total_out; ++k)
crc = jtr_crc32(crc,decompBuf[k]);
MEM_FREE(decompBuf);
MEM_FREE(decrBuf);
return ~crc == salt->crc32;
}
/* we have a stand alone function to handle this more complex method of
* loading from file, decrypting, decompressing, and crc'ing the data
* It is complex enough of a task, to have its own function. */
return cmp_exact_loadfile(index);
}
#if USE_PKZIP_MAGIC
const char exBytesUTF8[64] = {
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5
};
static int isLegalUTF8_char(const u8 *source, int length)
{
u8 a;
int len;
const u8 *srcptr;
if (*source < 0xC0)
return 1;
len = exBytesUTF8[*source&0x3f];
srcptr = source+len;
if (len+1 > length)
return -1;
switch (len) {
default: return -1;
/* Everything else falls through when "true"... */
case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return -1;
case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return -1;
case 2: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return -1;
switch (*source) {
/* no fall-through in this inner switch */
case 0xE0: if (a < 0xA0) return -1;
break;
case 0xED: if (a > 0x9F) return -1;
break;
case 0xF0: if (a < 0x90) return -1;
break;
case 0xF4: if (a > 0x8F) return -1;
}
case 1: if (*source >= 0x80 && *source < 0xC2) return -1;
}
if (*source > 0xF4) return -1;
return len+1;
}
static int validate_ascii(const u8 *out, int inplen)
{
int i;
int unicode=0;
for (i = 0; i < inplen-1; ++i) {
if (out[i] > 0x7E) {
// first check to 'see' if this is a valid utf8 character. If so, let it 'pass'.
if (unicode)
return 0; // in unicode mode, we ONLY handle 'ascii' bytes in the low byte.
if (out[i] > 0xC0) {
int len;
if (i > inplen-4)
return 1;
len = isLegalUTF8_char(&out[i], 5);
if (len < 0) return 0;
i += (len-1);
}
else {
if (i) {
// check for utf8 BOM \xEF \xBB \xBF
if (out[0] == 0xEF && out[1] == 0xBB && out[2] == 0xBF) {
i = 2;
continue;
}
/* check for Unicode BOM (FF FE for utf16le, FE FF for utf16be, FF FE 00 00 for utf32le, not sure if 00 00 FE FF is utf32be, but likely is) */
if (out[0] == 0xFF && out[1] == 0xFE) {
unicode = 1;
i++;
continue;
}
/* unicode BE bom */
if (out[0] == 0xFE && out[1] == 0xFF) {
unicode = 1;
i += 2;
continue;
}
/* utf32 LE */
if (out[0] == 0xFF && out[1] == 0xFE && out[2] == 0 && out[3] == 0) {
unicode = 3;
i += 3;
continue;
}
/* utf32 BE bom */
if (out[0] == 0 && out[1] == 0 && out[2] == 0xFE && out[3] == 0xFF) {
unicode = 3;
i += 6;
continue;
}
// allow a 'single' byte > 0x7E as long as bytes following are ascii.
if (out[1] <= 0x7E && out[1] >= 0x20) {
++i;
continue;
}
return 0;
}
}
} else if (out[i] < 0x20) {
/* we do not need to deal with DOS EOF char 0x1a, since we will never have the 'end' of the file */
/* we do allow the ESC character for ANSI files, however, they are frequently also binary, so will fail in other places */
if (out[i]!='\n' && out[i]!='\r' && out[i]!='\t' && out[i]!=0x1B)
return 0;
}
i += unicode; // skip the null bytes
}
return 1;
}
static int CheckSigs(const u8 *p, int len, ZIP_SIGS *pSig)
{
int i, j;
for (i = 0; i < pSig->magic_count; ++i) {
int fnd = 1;
u8 *pS = pSig->magic_signature[i];
for (j = 0; j < pSig->magic_sig_len[i]; ++j) {
if (p[j] != pS[j]) {
fnd = 0;
break;
}
}
if (fnd)
return 1;
}
return 0;
}
#endif
/* note, Buf is the 'full' decrypted zip buffer (len bytes long). It DOES contain the first 3 bits, which have already
* been decoded, and have told us we had a code 2 (var table block)
* all done without BITS(), PULLBYTE(), BITSNEEDED() macros. We 'know' the data we need, and we know that we have
* 'enough', so we do not worry about all of the overhead, and validation logic.
*
* In testing, this function catches ALL bad decryptions, except about 1/300 to 1/350. So, it is not too bad.
*/
MAYBE_INLINE static int check_inflate_CODE2(u8 *next)
{
u32 bits, hold, thisget, have, i;
int left;
u32 ncode;
u32 ncount[2]; // ends up being an array of 8 u8 count values. But we can clear it, and later 'check' it with 2 u32 instructions.
u8 *count; // this will point to ncount array. NOTE, this is alignment required 'safe' for Sparc systems or others requiring alignment.
#if (ARCH_LITTLE_ENDIAN==1) && (ARCH_ALLOWS_UNALIGNED==1)
// 'speedup' for x86 type systems. pkzip/inflate was designed here, so why not use it.
hold = *((u32*)next);
#else
hold = *next + (((u32)next[1])<<8) + (((u32)next[2])<<16) + (((u32)next[3])<<24);
#endif
next += 3; // we pre-increment when pulling it in the loop, thus we need to be 1 byte back.
hold >>= 3; // we already processed 3 bits
count = (u8*)ncount;
if (257+(hold&0x1F) > 286)
return 0; // nlen, but we do not use it.
hold >>= 5;
if (1+(hold&0x1F) > 30)
return 0; // ndist, but we do not use it.
hold >>= 5;
ncode = 4+(hold&0xF);
hold >>= 4;
// we have 15 bits left.
hold += ((u32)(*++next)) << 15;
hold += ((u32)(*++next)) << 23;
// we now have 31 bits. We need to know this for the loop below.
bits = 31;
// We have 31 bits now, in accum. If we are processing 19 codes, we do 7, then have 10 bits.
// Add 16 more and have 26, then use 21, have 5. Then load 16 more, then eat 15 of them.
have = 0;
ncount[0] = ncount[1] = 0;
for (;;) {
if (have+7>ncode)
thisget = ncode-have;
else
thisget = 7;
have += thisget;
bits -= thisget*3;
while (thisget--) {
++count[hold&7];
hold>>=3;
}
if (have == ncode)
break;
hold += ((u32)(*++next)) << bits;
bits += 8;
hold += ((u32)(*++next)) << bits;
bits += 8;
}
count[0] = 0;
if (!ncount[0] && !ncount[1])
return 0; /* if no codes at all, then simply bail, that is invalid */
/* check for an over-subscribed or incomplete set of lengths */
/* this will catch about 319 out of 320 'bad' passwords that */
/* have made it into this function. Note, only 1/4 of the */
/* passwords which pass the checksum, can make it here. Of */
/* those, we drop 319/320 or about that many (a good check!) */
left = 1;
for (i = 1; i <= 7; ++i) {
left <<= 1;
left -= count[i];
if (left < 0)
return 0; /* over-subscribed */
}
if (left > 0)
return 0; /* incomplete set */
return 1; /* Passed this check! */
}
//static code const * const lcode = lenfix;
//static code const * const dcode = distfix;
/* This function handles inflate CODE type 1. This is a 'fixed' table code. We set the fixed table, */
/* and then inflate some data (without writing anything. If we find any BAD lookback data, we can */
/* return a failure. We have 24 bytes of inflate data, and this almost always is more than enough */
/* to turn up an error. If we find we need more, we will do more than 24 */
MAYBE_INLINE static int check_inflate_CODE1(u8 *next, int left)
{
u32 whave = 0, op, bits, hold,len;
code here;
#if (ARCH_LITTLE_ENDIAN==1) && (ARCH_ALLOWS_UNALIGNED==1)
// 'speedup' for x86 type systems. pkzip/inflate was designed here, so why not use it.
hold = *((u32*)next);
#else
hold = *next + (((u32)next[1])<<8) + (((u32)next[2])<<16) + (((u32)next[3])<<24);
#endif
next += 3; // we pre-increment when pulling it in the loop, thus we need to be 1 byte back.
left -= 4;
hold >>= 3; // we already processed 3 bits
bits = 32-3;
for (;;) {
if (bits < 15) {
if (left < 2)
return 1; // we are out of bytes. Return we had no error.
left -= 2;
hold += (u32)(*++next) << bits;
bits += 8;
hold += (u32)(*++next) << bits;
bits += 8;
}
here=lenfix[hold & 0x1FF];
op = (unsigned)(here.bits);
hold >>= op;
bits -= op;
op = (unsigned)(here.op);
if (op == 0) /* literal */
++whave;
else if (op & 16) { /* length base */
len = (unsigned)(here.val);
op &= 15; /* number of extra bits */
if (op) {
if (bits < op) {
if (!left)
return 1; /*we are out of bytes. Return we had no error.*/
--left;
hold += (u32)(*++next) << bits;
bits += 8;
}
len += (unsigned)hold & ((1U << op) - 1);
hold >>= op;
bits -= op;
}
if (bits < 15) {
if (left < 2)
return 1; /*we are out of bytes. Return we had no error.*/
left -= 2;
hold += (u32)(*++next) << bits;
bits += 8;
hold += (u32)(*++next) << bits;
bits += 8;
}
here = distfix[hold & 0x1F];
// dodist:
op = (unsigned)(here.bits);
hold >>= op;
bits -= op;
op = (unsigned)(here.op);
if (op & 16) { /* distance base */
u32 dist = (unsigned)(here.val);
op &= 15; /* number of extra bits */
if (bits < op) {
if (!left)
return 1; /*we are out of bytes. Return we had no error.*/
--left;
hold += (u32)(*++next) << bits;
bits += 8;
if (bits < op) {
if (!left)
return 1; /*we are out of bytes. Return we had no error.*/
--left;
hold += (u32)(*++next) << bits;
bits += 8;
}
}
dist += (unsigned)hold & ((1U << op) - 1);
if (dist > whave)
return 0; /*invalid distance too far back*/
hold >>= op;
bits -= op;
//***** start of patched code from Pavel Semjanov (see original code below)
whave += len;
}
else
return 0; /*invalid distance code*/
}
else if (op & 32) {
// end of block [may present in short sequences, but only at the end.] NOTE, we need to find out if we EVER hit the end of a block, at only 24 bytes???
if (left == 0)
return 1;
return 0;
}
else {
return 0; // invalid literal/length code.
}
//***** End of patched code from Pavel
}
}
// original code block (for above), prior to patch from Pavel Semjanov [pavel@semjanov.com]
// this code would be a direct drop in between the comments starting and stopping with //***** above
// also the dodist label was commented out (no longer used).
#if 0
whave += dist;
}
else if ((op & 64) == 0) { /* 2nd level distance code */
here = distfix[here.val + (hold & ((1U << op) - 1))];
goto dodist;
}
else
return 0; /*invalid distance code*/
}
else if (op & 64) {
// 2nd level length code.
//here = lcode[here.val + (hold & ((1U << op) - 1))];
//goto dolen;
// this causes an infinite loop. Also, I VERY seriously doubt, this will EVER happen in the first
// 24 bytes of code. NOTE, there may be problems, in the fact this causes a inf loop!, but for now,
// simply return 0, then debug later.
return 0;
}
else if (op & 32) {
// end of block NOTE, we need to find out if we EVER hit the end of a block, at only 24 bytes???
// It is VERY likely we do SHOULD NOT EVER hit this. If that is the case, return that this block is bogus.
// check next OP (if we have enough bits left), if CODE=3, fail. If code==0, check
return 0;
}
else {
return 0; // invalid literal/length code.
}
#endif
/*
* Crypt_all simply performs the checksum .zip validatation of the data. It performs
* this for ALL hashes provided. If any of them fail to match, then crypt all puts the
* complement of the 'proper' checksum of the first hash into the output. These 2 bytes
* are checked against the binary for this salt/password combination. Thus, if any
* checksum fails, it will never match binary. However, if ALL of the checksums match
* we then put the checksum bytes from the first hash, into our output data. Then, when
* the binary check (cmp_all, cmp_one) is performed, it WILL match. NOTE, this does
* not mean we have found the password. Just that all hashes quick check checksums
* for this password 'work'.
*/
static int crypt_all(int *pcount, struct db_salt *_salt)
{
const int _count = *pcount;
int idx;
#if (ZIP_DEBUG==2)
static int CNT, FAILED, FAILED2;
++CNT;
#endif
// pkzip kinda sucks a little for multi-threading, since there is different amount of work to be
// done, depenging upon the password. Thus, we pack in OMP_MOD passwords into each thread, and
// hopefully some of the differnces will even themselves out in the end. If we have 2 threads
// then thread 1 gets 0 to 127 password, and thread 2 gets 128-256. Once they 'get' their data,
// there should be no mutexing of the runtime data, thus the threads should run fast.
// Also, since we have 'multiple' files in a .zip file (and multiple checksums), we bail as at the
// first time we fail to match checksum. So, there may be some threads which check more checksums.
// Again, hopefully globbing many tests into a threads working set will flatten out these differences.
#ifdef _OPENMP
#pragma omp parallel for private(idx)
#endif
for (idx = 0; idx < _count; ++idx) {
int cur_hash_count = salt->cnt;
int cur_hash_idx = -1;
MY_WORD key0, key1, key2;
u8 C;
const u8 *b;
u8 curDecryBuf[256];
#if USE_PKZIP_MAGIC
u8 curInfBuf[128];
#endif
int k, SigChecked;
u16 e, e2, v1, v2;
z_stream strm;
int ret;
/* use the pwkey for each hash. We mangle on the 12 bytes of IV to what was computed in the pwkey load. */
if (dirty) {
u8 *p = (u8*)saved_key[idx];
/* load the 'pwkey' one time, put it into the K12 array */
key0.u = 0x12345678UL; key1.u = 0x23456789UL; key2.u = 0x34567890UL;
do {
key0.u = jtr_crc32 (key0.u, *p++);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
} while (*p);
K12[idx*3] = key0.u, K12[idx*3+1] = key1.u, K12[idx*3+2] = key2.u;
goto SkipKeyLoadInit;
}
do
{
// 2nd, and later times through the loop, AND if keys are not dirty (i.e. multiple salts
// for the same key load), we do NOT perform the key compute, but instead load the pre-computed
// key data from the array.
key0.u = K12[idx*3], key1.u = K12[idx*3+1], key2.u = K12[idx*3+2];
SkipKeyLoadInit:;
b = salt->H[++cur_hash_idx].h;
k=11;
e = salt->H[cur_hash_idx].c;
e2 = salt->H[cur_hash_idx].c2;
do
{
C = PKZ_MULT(*b++,key2);
key0.u = jtr_crc32 (key0.u, C);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
}
while(--k);
/* if the hash is a 2 byte checksum type, then check that value first */
/* There is no reason to continue if this byte does not check out. */
if (salt->chk_bytes == 2 && C != (e&0xFF) && C != (e2&0xFF))
goto Failed_Bailout;
C = PKZ_MULT(*b++,key2);
#if 1
// https://github.com/magnumripper/JohnTheRipper/issues/467
// Fixed, JimF. Added checksum test for crc32 and timestamp.
if (C != (e>>8) && C != (e2>>8))
goto Failed_Bailout;
#endif
// Now, update the key data (with that last byte.
key0.u = jtr_crc32 (key0.u, C);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
// Ok, we now have validated this checksum. We need to 'do some' extra pkzip validation work.
// What we do here, is to decrypt a little data (possibly only 1 byte), and perform a single
// 'inflate' check (if type is 8). If type is 0 (stored), and we have a signature check, then
// we do that here. Also, if the inflate code is a 0 (stored block), and we do sig check, then
// we can do that WITHOUT having to call inflate. however, if there IS a sig check, we will have
// to call inflate on 'some' data, to get a few bytes (or error code). Also, if this is a type
// 2 or 3, then we do the FULL inflate, CRC check here.
e = 0;
// First, we want to get the inflate CODE byte (the first one).
C = PKZ_MULT(*b++,key2);
SigChecked = 0;
if ( salt->H[cur_hash_idx].compType == 0) {
// handle a stored file.
// We can ONLY deal with these IF we are handling 'magic' testing.
#if USE_PKZIP_MAGIC
// Ok, if we have a signature, check it here, WITHOUT having to call zLib's inflate.
if (salt->H[cur_hash_idx].pSig->max_len) {
int len = salt->H[cur_hash_idx].pSig->max_len;
if (len > salt->H[cur_hash_idx].datlen-12)
len = salt->H[cur_hash_idx].datlen-12;
SigChecked = 1;
curDecryBuf[0] = C;
for (; e < len;) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
if (salt->H[cur_hash_idx].magic == 255) {
if (!validate_ascii(&curDecryBuf[5], len-5))
goto Failed_Bailout;
} else {
if (!CheckSigs(curDecryBuf, len, salt->H[cur_hash_idx].pSig))
goto Failed_Bailout;
}
}
#endif
continue;
}
#if 1
// https://github.com/magnumripper/JohnTheRipper/issues/467
// Ok, if this is a code 3, we are done.
// Code moved to after the check for stored type. (FIXED) This check was INVALID for a stored type file.
if ( (C & 6) == 6)
goto Failed_Bailout;
#endif
if ( (C & 6) == 0) {
// Check that checksum2 is 0 or 1. If not, I 'think' we can be done
if (C > 1)
goto Failed_Bailout;
// now get 4 bytes. This is the length. It is made up of 2 16 bit values.
// these 2 values are checksumed, so it is easy to tell if the data is WRONG.
// correct data is u16_1 == (u16_2^0xFFFF)
curDecryBuf[0] = C;
for (e = 0; e <= 4; ) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
v1 = curDecryBuf[1] | (((u16)curDecryBuf[2])<<8);
v2 = curDecryBuf[3] | (((u16)curDecryBuf[4])<<8);
if (v1 != (v2^0xFFFF))
goto Failed_Bailout;
#if USE_PKZIP_MAGIC
// Ok, if we have a signature, check it here, WITHOUT having to call zLib's inflate.
if (salt->H[cur_hash_idx].pSig->max_len) {
int len = salt->H[cur_hash_idx].pSig->max_len + 5;
if (len > salt->H[cur_hash_idx].datlen-12)
len = salt->H[cur_hash_idx].datlen-12;
SigChecked = 1;
for (; e < len;) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
if (salt->H[cur_hash_idx].magic == 255) {
if (!validate_ascii(&curDecryBuf[5], len-5))
goto Failed_Bailout;
} else {
if (!CheckSigs(&curDecryBuf[5], len-5, salt->H[cur_hash_idx].pSig))
goto Failed_Bailout;
}
}
#endif
}
else {
// Ok, now we have handled inflate code type 3 and inflate code 0 (50% of 'random' data)
// We now have the 2 'hard' ones left (fixed table, and variable table)
curDecryBuf[0] = C;
if ((C&6) == 4) { // inflate 'code' 2 (variable table)
#if (ZIP_DEBUG==2)
static unsigned count, found;
++count;
#endif
// we need 4 bytes, + 2, + 4 at most.
for (; e < 10;) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
if (!check_inflate_CODE2(curDecryBuf))
goto Failed_Bailout;
#if (ZIP_DEBUG==2)
fprintf(stderr, "CODE2 Pass=%s count = %u, found = %u\n", saved_key[idx], count, ++found);
#endif
}
else {
int til;
#if (ZIP_DEBUG==2)
static unsigned count, found;
++count;
#endif
til = 36;
if (salt->H[cur_hash_idx].datlen-12 < til)
til = salt->H[cur_hash_idx].datlen-12;
for (; e < til;) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
if (!check_inflate_CODE1(curDecryBuf, til))
goto Failed_Bailout;
#if (ZIP_DEBUG==2)
fprintf(stderr, "CODE1 Pass=%s count = %u, found = %u\n", saved_key[idx], count, ++found);
#endif
}
}
#if USE_PKZIP_MAGIC
// Ok, now see if we need to check sigs, or do a FULL inflate/crc check.
if (!SigChecked && salt->H[cur_hash_idx].pSig->max_len) {
int til = 180;
if (salt->H[cur_hash_idx].datlen-12 < til)
til = salt->H[cur_hash_idx].datlen-12;
for (; e < til;) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
strm.next_in = Z_NULL;
strm.avail_in = til;
ret = inflateInit2(&strm, -15); /* 'raw', since we do not have gzip header, or gzip crc. .ZIP files are 'raw' implode data. */
if (ret != Z_OK)
perror("Error, initializing the libz inflateInit2() system\n");
strm.next_in = curDecryBuf;
strm.avail_out = sizeof(curInfBuf);
strm.next_out = curInfBuf;
ret = inflate(&strm, Z_SYNC_FLUSH);
inflateEnd(&strm);
if (ret != Z_OK) {
// we need to handle zips smaller than sizeof curInfBuf. If we find a zip of this
// size, the return is Z_STREAM_END, BUT things are fine.
if (ret == Z_STREAM_END && salt->deCompLen == strm.total_out)
; // things are ok.
else
goto Failed_Bailout;
}
if (!strm.total_out)
goto Failed_Bailout;
ret = salt->H[cur_hash_idx].pSig->max_len;
if (salt->H[cur_hash_idx].magic == 255) {
if (!validate_ascii(curInfBuf, strm.total_out))
goto Failed_Bailout;
} else {
if (strm.total_out < ret)
goto Failed_Bailout;
if (!CheckSigs(curInfBuf, strm.total_out, salt->H[cur_hash_idx].pSig))
goto Failed_Bailout;
}
}
#endif
if (salt->H[cur_hash_idx].full_zip) {
u8 inflateBufTmp[1024];
if (salt->compLen > 240 && salt->H[cur_hash_idx].datlen >= 200) {
for (;e < 200;) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
strm.next_in = Z_NULL;
strm.avail_in = e;
ret = inflateInit2(&strm, -15); /* 'raw', since we do not have gzip header, or gzip crc. .ZIP files are 'raw' implode data. */
if (ret != Z_OK)
perror("Error, initializing the libz inflateInit2() system\n");
strm.next_in = curDecryBuf;
strm.avail_out = sizeof(inflateBufTmp);
strm.next_out = inflateBufTmp;
ret = inflate(&strm, Z_SYNC_FLUSH);
inflateEnd(&strm);
if (ret != Z_OK) {
#if (ZIP_DEBUG==2)
fprintf(stderr, "fail=%d fail2=%d tot="LLd"\n", ++FAILED, FAILED2, ((long long)CNT)*_count);
#endif
goto Failed_Bailout;
}
}
goto KnownSuccess;
}
}
while(--cur_hash_count);
/* We got a checksum HIT!!!! All hash checksums matched. */
/* We load the proper checksum value for the gethash */
KnownSuccess: ;
chk[idx] = 1;
continue;
Failed_Bailout: ;
/* We load the wrong checksum value for the gethash */
chk[idx] = 0;
}
/* clear the 'dirty' flag. Then on multiple different salt calls, we will not have to */
/* encrypt the passwords again. They will have already been loaded in the K12[] array. */
dirty = 0;
return _count;
}
struct fmt_main fmt_pkzip = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_DYNA_SALT | FMT_HUGE_INPUT,
{ NULL },
{ FORMAT_TAG, FORMAT_TAG2 },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_dyna_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_LIBZ */
|
sample-4.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
int main()
{
int a[10][10], b[10][10], mul[10][10], r, c, i, j, k;
system("cls");
printf("enter the number of row=");
scanf("%d", &r);
printf("enter the number of column=");
scanf("%d", &c);
printf("enter the first matrix element=\n");
for (i = 0; i < r; i++)
{
for (j = 0; j < c; j++)
{
scanf("%d", &a[i][j]);
}
}
printf("enter the second matrix element=\n");
for (i = 0; i < r; i++)
{
for (j = 0; j < c; j++)
{
scanf("%d", &b[i][j]);
}
}
int numCols = c;
int NUM_THREADS = 12; // use built-in function?
int numColsPerThread = floor(numCols / NUM_THREADS);
int numColsForLastThread = numCols - (NUM_THREADS - 1) * numColsPerThread;
int lastThreadID = NUM_THREADS - 1;
int numNonLastThread = NUM_THREADS - 1;
#pragma omp parallel
{
int threadID = omp_get_thread_num();
if (threadID != lastThreadID)
{
for (int i = 0; i < r; i++)
{
int nc = (threadID + 1) * numColsPerThread;
for (int j = threadID * numColsPerThread; j < nc; j++)
{
mul[i][j] = 0;
for (k = 0; k < nc; k++)
{
mul[i][j] += a[i][k] * b[k][j];
}
}
}
}
else
{ // last thread.
for (int i = 0; i < r; i++)
{
int nc = numCols;
for (int j = threadID * numColsPerThread; j < numCols; j++)
{
mul[i][j] = 0;
for (k = 0; k < nc; k++)
{
mul[i][j] += a[i][k] * b[k][j];
}
}
}
}
}
printf("multiply of the matrix=\n");
for (i = 0; i < r; i++)
{
for (j = 0; j < c; j++)
{
printf("%d\t", mul[i][j]);
}
printf("\n");
}
return 0;
}
|
image_handler.h | #include "parameters.h"
class ImageHandler
{
public:
ros::NodeHandle nh;
ros::Publisher pub_image;
cv::Mat image_range;
cv::Mat image_noise;
cv::Mat image_intensity;
pcl::PointCloud<PointType>::Ptr cloud_track;
ImageHandler()
{
cloud_track.reset(new pcl::PointCloud<PointType>());
cloud_track->resize(IMAGE_HEIGHT * IMAGE_WIDTH);
pub_image = nh.advertise<sensor_msgs::Image>("loop_detector/image_stack", 1);
}
void cloud_handler(const sensor_msgs::PointCloud2ConstPtr &cloud_msg)
{
// convert cloud
pcl::PointCloud<PointOuster>::Ptr laser_cloud(new pcl::PointCloud<PointOuster>());
pcl::fromROSMsg(*cloud_msg, *laser_cloud);
//PointOuster: x, y, z, intensity, time, noise, ring
assert((int)laser_cloud->size() % IMAGE_HEIGHT * IMAGE_WIDTH == 0);
// reset images
image_range = cv::Mat(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC1, cv::Scalar(0));
image_noise = cv::Mat(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC1, cv::Scalar(0));
image_intensity = cv::Mat(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC1, cv::Scalar(0));
#pragma omp parallel for num_threads(NUM_THREADS)
for (int u = 0; u < IMAGE_HEIGHT; u++)
{
for (int v = 0; v < IMAGE_WIDTH; v++)
{
const auto& pt = laser_cloud->points[u * IMAGE_WIDTH + v];
// extract sensor data
float range = std::sqrt(pt.x*pt.x + pt.y*pt.y + pt.z*pt.z);
float noise = pt.noise;
float intensity = pt.intensity;
// limit to (0~255)
noise = std::min(noise, 255.0f);
intensity = std::min(intensity, 255.0f);
// update all images
image_range.at<uint8_t>(u, v) = std::min(range * 20, 255.0f); //TODO(jxl): 20?
image_noise.at<uint8_t>(u, v) = noise;
image_intensity.at<uint8_t>(u, v) = intensity;
// update cloud
PointType* p = &cloud_track->points[u * IMAGE_WIDTH + v]; //一帧点云保存到cloud_track
if (range >= 0.1)
{
p->x = pt.x;
p->y = pt.y;
p->z = pt.z;
p->intensity = intensity;
}
else
{
p->x = p->y = p->z = p->intensity = 0;
}
}
}
if (pub_image.getNumSubscribers() != 0)
{
// option 1: display intensity image
// cv::Mat image_visualization = image_intensity.clone();
// cv::cvtColor(image_visualization, image_visualization, CV_GRAY2RGB);
// pubImage(&pub_image, image_visualization, cloud_msg->header, "bgr8");
// option 2: display all images from available lidar channels
cv::Mat image_visualization;
cv::vconcat(image_noise, image_intensity, image_visualization);
cv::vconcat(image_visualization, image_range, image_visualization);
cv::cvtColor(image_visualization, image_visualization, CV_GRAY2RGB);
cv::putText(image_visualization, "Ambient", cv::Point2f(5, 20 + IMAGE_HEIGHT*0), CV_FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(255,0,255), 2);
cv::putText(image_visualization, "Intensity", cv::Point2f(5, 20 + IMAGE_HEIGHT*1), CV_FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(255,0,255), 2);
cv::putText(image_visualization, "Range", cv::Point2f(5, 20 + IMAGE_HEIGHT*2), CV_FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(255,0,255), 2);
pubImage(&pub_image, image_visualization, cloud_msg->header, "bgr8");
}
// static tf in case tf between base_link and lidar is missing
static tf::TransformBroadcaster tf_base_to_lidar;
static tf::Transform base_to_lidar = tf::Transform(tf::createQuaternionFromRPY(0, 0, 0), tf::Vector3(0, 0, 0));
tf_base_to_lidar.sendTransform(tf::StampedTransform(base_to_lidar, cloud_msg->header.stamp, "base_link", "velodyne"));
}
void pubImage(ros::Publisher *this_pub, const cv::Mat& this_image, std_msgs::Header this_header, string image_format)
{
static cv_bridge::CvImage bridge;
bridge.header = this_header;
bridge.encoding = image_format;
bridge.image = this_image;
this_pub->publish(bridge.toImageMsg());
}
}; |
main.c | #include <assert.h>
#include <math.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/time.h>
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#ifndef min
#define min(a, b) (((a) < (b)) ? (a) : (b))
#endif
#define min3(x, y, z) min(min(x, y), z)
#ifndef max
#define max(a, b) (((a) > (b)) ? (a) : (b))
#endif
#define max3(x, y, z) max(max(x, y), z)
#define CLAMP2BYTE(v) (((unsigned) (v)) < 255 ? (v) : (v < 0) ? 0 : 255)
unsigned int detect(uint8_t *pixel,
uint8_t **plane,
int width,
int height,
int channels)
{
int stride = width * channels;
int last_col = width * channels - channels;
int last_row = height * stride - stride;
unsigned int sum = 0;
for (int y = 0; y < height; y++) {
int cur_row = stride * y;
int next_row = min(cur_row + stride, last_row);
uint8_t *next_scanline = pixel + next_row;
uint8_t *cur_scanline = pixel + cur_row;
for (int x = 0; x < width; x++) {
int cur_col = x * channels;
int next_col = min(cur_col + channels, last_col);
uint8_t *c00 = cur_scanline + cur_col;
uint8_t *c10 = cur_scanline + next_col;
uint8_t *c01 = next_scanline + cur_col;
uint8_t *c11 = next_scanline + next_col;
int r_avg = ((c00[0] + c10[0] + c01[0] + c11[0])) >> 2;
int g_avg = ((c00[1] + c10[1] + c01[1] + c11[1])) >> 2;
int b_avg = ((c00[2] + c10[2] + c01[2] + c11[2])) >> 2;
#if OPT_PLANE
/* clang-format off */
plane[0][y*width + x] = c00[0];
plane[1][y*width + x] = c00[1];
plane[2][y*width + x] = c00[2];
/* clang-format on */
#endif
/* TODO: detect appropriate RGB values */
if (r_avg >= 60 && g_avg >= 40 && b_avg >= 20 && r_avg >= b_avg &&
(r_avg - g_avg) >= 10)
if (max3(r_avg, g_avg, b_avg) - min3(r_avg, g_avg, b_avg) >= 10)
sum++;
}
}
return sum;
}
void compute_offset(int *out, int len, int left, int right, int step)
{
assert(out);
assert((len >= 0) && (left >= 0) && (right >= 0));
for (int x = -left; x < len + right; x++) {
int pos = x;
int len2 = 2 * len;
if (pos < 0) {
do {
pos += len2;
} while (pos < 0);
} else if (pos >= len2) {
do {
pos -= len2;
} while (pos >= len2);
}
if (pos >= len)
pos = len2 - 1 - pos;
out[x + left] = pos * step;
}
}
void denoise(uint8_t *out,
uint8_t *in,
int *smooth_table,
int width,
int height,
int channels,
int radius)
{
assert(in && out);
assert(radius > 0);
int window_size = (2 * radius + 1) * (2 * radius + 1);
int *col_pow = malloc(width * channels * sizeof(int));
int *col_val = malloc(width * channels * sizeof(int));
int *row_pos = malloc((width + 2 * radius) * channels * sizeof(int));
int *col_pos = malloc((height + 2 * radius) * channels * sizeof(int));
int stride = width * channels;
compute_offset(row_pos, width, radius, radius, channels);
compute_offset(col_pos, height, radius, radius, stride);
int *row_off = row_pos + radius;
int *col_off = col_pos + radius;
for (int y = 0; y < height; y++) {
uint8_t *scan_in_line = in + y * stride;
uint8_t *scan_out_line = out + y * stride;
if (y == 0) {
for (int x = 0; x < stride; x += channels) {
int col_sum[3] = {0};
int col_sum_pow[3] = {0};
for (int z = -radius; z <= radius; z++) {
uint8_t *sample = in + col_off[z] + x;
for (int c = 0; c < channels; ++c) {
col_sum[c] += sample[c];
col_sum_pow[c] += sample[c] * sample[c];
}
}
for (int c = 0; c < channels; ++c) {
col_val[x + c] = col_sum[c];
col_pow[x + c] = col_sum_pow[c];
}
}
} else {
uint8_t *last_col = in + col_off[y - radius - 1];
uint8_t *next_col = in + col_off[y + radius];
for (int x = 0; x < stride; x += channels) {
for (int c = 0; c < channels; ++c) {
col_val[x + c] -= last_col[x + c] - next_col[x + c];
col_pow[x + c] -= last_col[x + c] * last_col[x + c] -
next_col[x + c] * next_col[x + c];
}
}
}
int prev_sum[3] = {0}, prev_sum_pow[3] = {0};
for (int z = -radius; z <= radius; z++) {
int index = row_off[z];
for (int c = 0; c < channels; ++c) {
prev_sum[c] += col_val[index + c];
prev_sum_pow[c] += col_pow[index + c];
}
}
for (int c = 0; c < channels; ++c) {
int mean = prev_sum[c] / window_size;
int diff = mean - scan_in_line[c];
int edge = CLAMP2BYTE(diff);
int masked_edge =
(edge * scan_in_line[c] + (256 - edge) * mean) >> 8;
int var = (prev_sum_pow[c] - mean * prev_sum[c]) / window_size;
int out = masked_edge -
diff * var / (var + smooth_table[scan_in_line[c]]);
scan_out_line[c] = CLAMP2BYTE(out);
}
scan_in_line += channels, scan_out_line += channels;
for (int x = 1; x < width; x++) {
int last_row = row_off[x - radius - 1];
int next_row = row_off[x + radius];
for (int c = 0; c < channels; ++c) {
prev_sum[c] -= col_val[last_row + c] - col_val[next_row + c];
prev_sum_pow[c] = prev_sum_pow[c] - col_pow[last_row + c] +
col_pow[next_row + c];
int mean = prev_sum[c] / window_size;
int diff = mean - scan_in_line[c];
int edge = CLAMP2BYTE(diff);
int masked_edge =
(edge * scan_in_line[c] + (256 - edge) * mean) >> 8;
int var = (prev_sum_pow[c] - mean * prev_sum[c]) / window_size;
int out = masked_edge -
diff * var / (var + smooth_table[scan_in_line[c]]);
scan_out_line[c] = CLAMP2BYTE(out);
}
scan_in_line += channels, scan_out_line += channels;
}
}
free(col_pow);
free(col_val);
free(row_pos);
free(col_pos);
}
/* clang-format off */
void denoise2(
uint8_t *out,
uint8_t **planes,
int *smooth_table,
int width,
int height,
int channels,
int ch_idx,
int radius
){
uint8_t *in = planes[ch_idx];
assert(in && out);
assert(radius > 0);
int window_size = (2*radius + 1) * (2*radius + 1);
int *col_pow = calloc(width * sizeof(int), 1);
int *col_val = calloc(width * sizeof(int), 1);
int *row_pos = malloc((width + 2*radius) * sizeof(int));
int *col_pos = malloc((height + 2*radius) * sizeof(int));
int stride = width;
compute_offset(row_pos, width, radius, radius, 1);
compute_offset(col_pos, height, radius, radius, stride);
int *row_off = row_pos + radius;
int *col_off = col_pos + radius;
for (int x = 0; x < stride; x ++) {
for (int z = -radius; z <= radius; z++) {
uint8_t sample = *(in + col_off[z] + x);
col_val[x] += sample;
col_pow[x] += sample * sample;
}
}
for (int y = 0; y < height; y++) {
uint8_t *scan_in_line = in + y*stride;
uint8_t *scan_out_line = out + y*stride*channels;
if (y > 0) {
uint8_t *last_col = in + col_off[y - radius - 1];
uint8_t *next_col = in + col_off[y + radius];
for (int x = 0; x < stride; x++) {
col_val[x] -= last_col[x] - next_col[x];
col_pow[x] -= last_col[x]*last_col[x] - next_col[x]*next_col[x];
}
}
int prev_sum = 0, prev_sum_pow = 0;
for (int z = -radius; z <= radius; z++) {
int index = row_off[z];
prev_sum += col_val[index];
prev_sum_pow += col_pow[index];
}
for (int x = 0; x < width; x++) {
int last_row = row_off[x - radius - 1];
int next_row = row_off[x + radius];
if(x > 0){
prev_sum -= col_val[last_row] - col_val[next_row];
prev_sum_pow = prev_sum_pow - col_pow[last_row] + col_pow[next_row];
}
int pix = *scan_in_line;
int mean = prev_sum / window_size;
int diff = mean - pix;
int edge = CLAMP2BYTE(diff);
int masked_edge = (edge*pix + (256 - edge)*mean) >> 8;
int var = (prev_sum_pow - mean*prev_sum) / window_size;
int out = masked_edge - diff*var / (var + smooth_table[pix]);
scan_out_line[ch_idx] = CLAMP2BYTE(out);
scan_in_line++, scan_out_line += channels;
}
}
free(col_pow);
free(col_val);
free(row_pos);
free(col_pos);
}
static void die(char *msg)
{
fprintf(stderr, "Fatal: %s\n", msg);
exit(-1);
}
inline uint64_t time_diff(struct timeval *st, struct timeval *et)
{
return (et->tv_sec - st->tv_sec)*1000000ULL + (et->tv_usec - st->tv_usec);
}
/* clang-format on */
int main(int argc, char *argv[])
{
if (argc < 2) {
printf("%s -i INPUT [-o OUTPUT] [-l LEVEL]\n", argv[0]);
return -1;
}
char *ifn = NULL;
char *ofn = "out.jpg";
int smoothing_level = 10;
/* clang-format off */
int opt;
while ((opt = getopt (argc, argv, "i:l:o:")) != -1){
switch(opt){
case 'i': {
ifn = optarg;
break;
}
case 'l': {
smoothing_level = atoi(optarg);
smoothing_level = (smoothing_level < 1 ? 1 : (smoothing_level > 20 ? 20 : smoothing_level));
break;
}
case 'o': {
ofn = optarg;
break;
}
}
}
printf("ifn:%s ofn:%s level:%d\n", ifn, ofn, smoothing_level);
/* clang-format on */
struct timeval stime, etime;
int width = 0, height = 0, channels = 0;
uint8_t *in = stbi_load(ifn, &width, &height, &channels, 0);
if (!in)
die("Fail to load input file");
assert(width > 0 && height > 0);
assert(channels >= 3);
int dimension = width * height;
uint8_t *out = malloc(dimension * channels);
if (!out)
die("Out of memory");
uint8_t *in_planes[4] = {NULL};
for (int i = 0; i < channels; i++)
in_planes[i] = malloc(dimension);
/* Separation between skin and non-skin pixels */
gettimeofday(&stime, NULL);
float rate = detect(in, in_planes, width, height, channels) /
(float) dimension * 100;
gettimeofday(&etime, NULL);
printf("detect - %lu us\n", time_diff(&stime, &etime));
/* Perform edge detection, resulting in an edge map for further denoise */
/* clang-format off */
gettimeofday(&stime, NULL);
int smooth_table[256] = {0};
float ii = 0.f;
for (int i = 0; i <= 255; i++, ii -= 1.) {
smooth_table[i] = (
expf(ii * (1.0f / (smoothing_level * 255.0f))) +
(smoothing_level * (i + 1)) + 1
) / 2;
smooth_table[i] = max(smooth_table[i], 1);
}
#if OPT_PLANE
#pragma omp parallel for
for(int i = 0; i < channels; i++)
denoise2(out, in_planes, smooth_table, width, height, channels, i, min(width, height)/rate + 1);
#else
denoise(out, in, smooth_table, width, height, channels, min(width, height) / rate + 1);
#endif
gettimeofday(&etime, NULL);
printf("denoise - %lu us\n", time_diff(&stime, &etime));
/* clang-format on */
if (!stbi_write_jpg(ofn, width, height, channels, out, 100))
die("Fail to generate");
for (int i = 0; i < channels; i++)
free(in_planes[i]);
free(out);
free(in);
return 0;
}
|
mandel_omp_nox_dynamic_256.c | /* Sequential Mandlebrot program */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#include <time.h>
#define X_RESN 1000 /* x resolution */
#define Y_RESN 1000 /* y resolution */
#define MAX_ITER (2000)
#define CHUNK 256
// ref: https://stackoverflow.com/questions/6749621/how-to-create-a-high-resolution-timer-in-linux-to-measure-program-performance
// call this function to start a nanosecond-resolution timer
struct timespec timer_start() {
struct timespec start_time;
clock_gettime(CLOCK_MONOTONIC, &start_time);
return start_time;
}
// call this function to end a timer, returning nanoseconds elapsed as a long
long timer_end(struct timespec start_time){
struct timespec end_time;
clock_gettime(CLOCK_MONOTONIC, &end_time);
long diffInNanos = (end_time.tv_sec - start_time.tv_sec) * (long)1e9 + (end_time.tv_nsec - start_time.tv_nsec);
return diffInNanos;
}
typedef struct complextype {
double real, imag;
} Compl;
int main(int argc, char *argv[])
{
struct timespec vartime = timer_start();
/* Mandlebrot variables */
int *ks;
ks = (int *)malloc((X_RESN*Y_RESN) * sizeof(int));
double *ds;
ds = (double *)malloc((X_RESN*Y_RESN) * sizeof(double));
/* Calculate and draw points */
#pragma omp parallel default(shared)
{
int num_threads = omp_get_num_threads();
// printf("num_threads = %d\n", num_threads);
#pragma omp for schedule(dynamic, CHUNK)
for (int it = 0; it < X_RESN*Y_RESN; it++)
{
int i = it / Y_RESN;
int j = it % Y_RESN;
// mandelbrot set is defined in the region of x = [-2, +2] and y = [-2, +2]
double u = ((double)i - (X_RESN / 2.0)) / (X_RESN / 4.0);
double v = ((double)j - (Y_RESN / 2.0)) / (Y_RESN / 4.0);
Compl z, c, t;
z.real = z.imag = 0.0;
c.real = v;
c.imag = u;
int k = 0;
double d = 0.0;
double lengthsq, temp;
do
{ /* iterate for pixel color */
t = z;
z.imag = 2.0 * t.real * t.imag + c.imag;
z.real = t.real * t.real - t.imag * t.imag + c.real;
lengthsq = z.real * z.real + z.imag * z.imag;
d += pow(pow(z.imag - t.imag, 2.0) + pow(z.real - t.real, 2.0), 0.5);
k++;
} while (lengthsq < 4.0 && k < MAX_ITER);
ks[it] = k;
ds[it] = d;
}
}
long time_elapsed_nanos = timer_end(vartime);
double elapsed = time_elapsed_nanos*0.000000001;
printf("%lf\n", elapsed);
/* Program Finished */
return 0;
}
|
GB_unop__acos_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__acos_fc32_fc32)
// op(A') function: GB (_unop_tran__acos_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = cacosf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cacosf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = cacosf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ACOS || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__acos_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = cacosf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = cacosf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__acos_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
THTensorMath.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/THTensorMath.c"
#else
#ifndef NAN
#define NAN (nan(NULL))
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
#define TH_OMP_OVERHEAD_THRESHOLD 100000
#ifdef _OPENMP
#ifndef _WIN32
#define PRAGMA(P) _Pragma(#P)
#else
#define PRAGMA(P) __pragma(P)
#endif
#define TH_TENSOR_APPLY_CONTIG(TYPE, TENSOR, CODE) \
{ \
ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR); \
PRAGMA(omp parallel if (TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD)) \
{ \
size_t num_threads = omp_get_num_threads(); \
size_t tid = omp_get_thread_num(); \
ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \
ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \
TH_TENSOR_offset + TH_TENSOR_size / num_threads; \
ptrdiff_t TENSOR##_len = TH_TENSOR_end - TH_TENSOR_offset; \
TYPE *TENSOR##_data = THTensor_(data)(TENSOR) + TH_TENSOR_offset; \
CODE \
} \
}
#else
#define TH_TENSOR_APPLY_CONTIG(TYPE, TENSOR, CODE) \
{ \
TYPE *TENSOR##_data = THTensor_(data)(TENSOR); \
ptrdiff_t TENSOR##_len = THTensor_(nElement)(TENSOR); \
CODE \
}
#endif
#ifdef _OPENMP
#define TH_TENSOR_APPLY2_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, CODE) \
{ \
ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR1); \
PRAGMA(omp parallel if (TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD)) \
{ \
size_t num_threads = omp_get_num_threads(); \
size_t tid = omp_get_thread_num(); \
ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \
ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \
TH_TENSOR_offset + TH_TENSOR_size / num_threads; \
ptrdiff_t TENSOR1##_len = TH_TENSOR_end - TH_TENSOR_offset; \
TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1) + TH_TENSOR_offset; \
TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2) + TH_TENSOR_offset; \
CODE \
} \
}
#else
#define TH_TENSOR_APPLY2_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, CODE) \
{ \
TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1); \
TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2); \
ptrdiff_t TENSOR1##_len = THTensor_(nElement)(TENSOR1); \
CODE \
}
#endif
#ifdef _OPENMP
#define TH_TENSOR_APPLY3_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, CODE) \
{ \
ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR1); \
PRAGMA(omp parallel if (TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD)) \
{ \
size_t num_threads = omp_get_num_threads(); \
size_t tid = omp_get_thread_num(); \
ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \
ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \
TH_TENSOR_offset + TH_TENSOR_size / num_threads; \
ptrdiff_t TENSOR1##_len = TH_TENSOR_end - TH_TENSOR_offset; \
TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1) + TH_TENSOR_offset; \
TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2) + TH_TENSOR_offset; \
TYPE3 *TENSOR3##_data = THTensor_(data)(TENSOR3) + TH_TENSOR_offset; \
CODE \
} \
}
#else
#define TH_TENSOR_APPLY3_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, CODE) \
{ \
TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1); \
TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2); \
TYPE3 *TENSOR3##_data = THTensor_(data)(TENSOR3); \
ptrdiff_t TENSOR1##_len = THTensor_(nElement)(TENSOR1); \
CODE \
}
#endif
void THTensor_(fill)(THTensor *r_, real value)
{
if (THTensor_(isContiguous)(r_) || THTensor_(isTransposed)(r_)) {
TH_TENSOR_APPLY_CONTIG(real, r_, THVector_(fill)(r__data, value, r__len););
} else {
TH_TENSOR_APPLY(real, r_,
if (r__stride == 1) {
THVector_(fill)(r__data, value, r__size);
r__i = r__size;
r__data += r__stride * r__size;
break;
} else {
*r__data = value;
}
);
}
}
void THTensor_(zero)(THTensor *r_)
{
THTensor_(fill)(r_, 0);
}
void THTensor_(maskedFill)(THTensor *tensor, THByteTensor *mask, real value)
{
TH_TENSOR_APPLY2(real, tensor, unsigned char, mask,
if (*mask_data > 1)
{
THFree(mask_counter);
THFree(tensor_counter);
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
*tensor_data = value;
});
}
void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src )
{
THTensor *srct = THTensor_(newContiguous)(src);
real *src_data = THTensor_(data)(srct);
ptrdiff_t cntr = 0;
ptrdiff_t nelem = THTensor_(nElement)(srct);
if (THTensor_(nElement)(tensor) != THByteTensor_nElement(mask))
{
THTensor_(free)(srct);
THError("Number of elements of destination tensor != Number of elements in mask");
}
TH_TENSOR_APPLY2(real, tensor, unsigned char, mask,
if (*mask_data > 1)
{
THTensor_(free)(srct);
THFree(mask_counter);
THFree(tensor_counter);
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
if (cntr == nelem)
{
THTensor_(free)(srct);
THFree(mask_counter);
THFree(tensor_counter);
THError("Number of elements of src < number of ones in mask");
}
*tensor_data = *src_data;
src_data++;
cntr++;
});
THTensor_(free)(srct);
}
void THTensor_(maskedSelect)(THTensor *tensor, THTensor *src, THByteTensor *mask)
{
ptrdiff_t numel = THByteTensor_sumall(mask);
real *tensor_data;
#ifdef DEBUG
THAssert(numel <= LONG_MAX);
#endif
THTensor_(resize1d)(tensor,numel);
tensor_data = THTensor_(data)(tensor);
TH_TENSOR_APPLY2(real, src, unsigned char, mask,
if (*mask_data > 1)
{
THFree(mask_counter);
THFree(src_counter);
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
*tensor_data = *src_data;
tensor_data++;
});
}
// Finds non-zero elements of a tensor and returns their subscripts
void THTensor_(nonzero)(THLongTensor *subscript, THTensor *tensor)
{
ptrdiff_t numel = 0;
long *subscript_data;
long i = 0;
long dim;
long div = 1;
#ifdef TH_REAL_IS_HALF
#define IS_NONZERO(val) ((val.x & 0x7fff) != 0)
#else
#define IS_NONZERO(val) ((val)!=0)
#endif
/* First Pass to determine size of subscripts */
TH_TENSOR_APPLY(real, tensor,
if IS_NONZERO(*tensor_data) {
++numel;
});
#ifdef DEBUG
THAssert(numel <= LONG_MAX);
#endif
THLongTensor_resize2d(subscript, numel, tensor->nDimension);
/* Second pass populates subscripts */
subscript_data = THLongTensor_data(subscript);
TH_TENSOR_APPLY(real, tensor,
if IS_NONZERO(*tensor_data) {
div = 1;
for (dim = tensor->nDimension - 1; dim >= 0; dim--) {
*(subscript_data + dim) = (i/div) % tensor->size[dim];
div *= tensor->size[dim];
}
subscript_data += tensor->nDimension;
}
++i;);
}
void THTensor_(indexSelect)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index)
{
ptrdiff_t i, numel;
THLongStorage *newSize;
THTensor *tSlice, *sSlice;
long *index_data;
real *tensor_data, *src_data;
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE);
THArgCheck(src->nDimension > 0,2,"Source tensor is empty");
numel = THLongTensor_nElement(index);
newSize = THLongStorage_newWithSize(src->nDimension);
THLongStorage_rawCopy(newSize,src->size);
#ifdef DEBUG
THAssert(numel <= LONG_MAX);
#endif
newSize->data[dim] = numel;
THTensor_(resize)(tensor,newSize,NULL);
THLongStorage_free(newSize);
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
if (dim == 0 && THTensor_(isContiguous)(src) && THTensor_(isContiguous)(tensor))
{
tensor_data = THTensor_(data)(tensor);
src_data = THTensor_(data)(src);
ptrdiff_t rowsize = THTensor_(nElement)(src) / src->size[0];
// check that the indices are within range
long max = src->size[0] - 1 + TH_INDEX_BASE;
for (i=0; i<numel; i++) {
if (index_data[i] < TH_INDEX_BASE || index_data[i] > max) {
THLongTensor_free(index);
THError("index out of range");
}
}
if (src->nDimension == 1) {
#pragma omp parallel for if(numel > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<numel; i++)
tensor_data[i] = src_data[index_data[i] - TH_INDEX_BASE];
} else {
#pragma omp parallel for if(numel*rowsize > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<numel; i++)
memcpy(tensor_data + i*rowsize, src_data + (index_data[i] - TH_INDEX_BASE)*rowsize, rowsize*sizeof(real));
}
}
else if (src->nDimension == 1)
{
for (i=0; i<numel; i++)
THTensor_(set1d)(tensor,i,THTensor_(get1d)(src,index_data[i] - TH_INDEX_BASE));
}
else
{
for (i=0; i<numel; i++)
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
THTensor_(select)(tSlice, tensor, dim, i);
THTensor_(select)(sSlice, src, dim, index_data[i] - TH_INDEX_BASE);
THTensor_(copy)(tSlice, sSlice);
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
}
THLongTensor_free(index);
}
void THTensor_(indexCopy)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
ptrdiff_t i, numel;
THTensor *tSlice, *sSlice;
long *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < src->nDimension, 4, "Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE);
THArgCheck(numel == src->size[dim],4,"Number of indices should be equal to source:size(dim)");
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
if (tensor->nDimension > 1 )
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
for (i=0; i<numel; i++)
{
THTensor_(select)(tSlice, tensor, dim, index_data[i] - TH_INDEX_BASE);
THTensor_(select)(sSlice, src, dim, i);
THTensor_(copy)(tSlice, sSlice);
}
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
else
{
for (i=0; i<numel; i++)
{
THTensor_(set1d)(tensor, index_data[i] - TH_INDEX_BASE, THTensor_(get1d)(src,i));
}
}
THLongTensor_free(index);
}
void THTensor_(indexAdd)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
ptrdiff_t i, numel;
THTensor *tSlice, *sSlice;
long *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE);
THArgCheck(numel == src->size[dim],4,"Number of indices should be equal to source:size(dim)");
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
if (tensor->nDimension > 1)
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
for (i=0; i<numel; i++)
{
THTensor_(select)(tSlice, tensor, dim, index_data[i] - TH_INDEX_BASE);
THTensor_(select)(sSlice, src, dim, i);
THTensor_(cadd)(tSlice, tSlice, 1.0, sSlice);
}
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
else
{
for (i=0; i<numel; i++)
{
THTensor_(set1d)(tensor,
index_data[i] - TH_INDEX_BASE,
THTensor_(get1d)(src,i) + THTensor_(get1d)(tensor,index_data[i] - TH_INDEX_BASE));
}
}
THLongTensor_free(index);
}
void THTensor_(indexFill)(THTensor *tensor, int dim, THLongTensor *index, real val)
{
ptrdiff_t i, numel;
THTensor *tSlice;
long *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < tensor->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE);
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
for (i=0; i<numel; i++)
{
if (tensor->nDimension > 1)
{
tSlice = THTensor_(new)();
THTensor_(select)(tSlice, tensor,dim,index_data[i] - TH_INDEX_BASE);
THTensor_(fill)(tSlice, val);
THTensor_(free)(tSlice);
}
else
{
THTensor_(set1d)(tensor, index_data[i] - TH_INDEX_BASE, val);
}
}
THLongTensor_free(index);
}
void THTensor_(gather)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index)
{
long elems_per_row, i, idx;
THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 2,
"Input tensor must have same dimensions as output tensor");
THArgCheck(dim < THTensor_(nDimension)(tensor), 3, "Index dimension is out of bounds");
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(src), 4,
"Index tensor must have same dimensions as input tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY3(real, tensor, real, src, long, index, dim,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < TH_INDEX_BASE || idx >= src_size + TH_INDEX_BASE)
{
THFree(TH_TENSOR_DIM_APPLY_counter);
THError("Invalid index in gather");
}
*(tensor_data + i*tensor_stride) = src_data[(idx - TH_INDEX_BASE) * src_stride];
})
}
void THTensor_(scatter)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
long elems_per_row, i, idx;
THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds");
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3,
"Index tensor must have same dimensions as output tensor");
THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 4,
"Input tensor must have same dimensions as output tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY3(real, tensor, real, src, long, index, dim,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE)
{
THFree(TH_TENSOR_DIM_APPLY_counter);
THError("Invalid index in scatter");
}
tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] = *(src_data + i*src_stride);
})
}
void THTensor_(scatterAdd)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
long elems_per_row, i, idx;
THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds");
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3,
"Index tensor must have same dimensions as output tensor");
THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 4,
"Input tensor must have same dimensions as output tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY3(real, tensor, real, src, long, index, dim,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE)
{
THFree(TH_TENSOR_DIM_APPLY_counter);
THError("Invalid index in scatterAdd");
}
tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] += *(src_data + i*src_stride);
})
}
void THTensor_(scatterFill)(THTensor *tensor, int dim, THLongTensor *index, real val)
{
long elems_per_row, i, idx;
THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds");
THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3,
"Index tensor must have same dimensions as output tensor");
elems_per_row = THLongTensor_size(index, dim);
TH_TENSOR_DIM_APPLY2(real, tensor, long, index, dim,
for (i = 0; i < elems_per_row; ++i)
{
idx = *(index_data + i*index_stride);
if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE)
{
THFree(TH_TENSOR_DIM_APPLY_counter);
THError("Invalid index in scatter");
}
tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] = val;
})
}
accreal THTensor_(dot)(THTensor *tensor, THTensor *src)
{
accreal sum = 0;
/* we use a trick here. careful with that. */
TH_TENSOR_APPLY2(real, tensor, real, src,
long sz = (tensor_size-tensor_i < src_size-src_i ? tensor_size-tensor_i : src_size-src_i);
sum += THBlas_(dot)(sz, src_data, src_stride, tensor_data, tensor_stride);
tensor_i += sz;
src_i += sz;
tensor_data += sz*tensor_stride;
src_data += sz*src_stride;
break;);
return sum;
}
#undef th_isnan
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
#define th_isnan(val) \
(isnan(val))
#else
#define th_isnan(val) (0)
#endif
#undef th_isnan_break
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
#define th_isnan_break(val) \
if (isnan(val)) break;
#else
#define th_isnan_break(val)
#endif
real THTensor_(minall)(THTensor *tensor)
{
real theMin;
real value;
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
theMin = THTensor_(data)(tensor)[0];
TH_TENSOR_APPLY(real, tensor,
value = *tensor_data;
/* This is not the same as value<theMin in the case of NaNs */
if(!(value >= theMin))
{
theMin = value;
th_isnan_break(value)
});
return theMin;
}
real THTensor_(maxall)(THTensor *tensor)
{
real theMax;
real value;
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
theMax = THTensor_(data)(tensor)[0];
TH_TENSOR_APPLY(real, tensor,
value = *tensor_data;
/* This is not the same as value>theMax in the case of NaNs */
if(!(value <= theMax))
{
theMax = value;
th_isnan_break(value)
});
return theMax;
}
static void THTensor_(quickselectnoidx)(real *arr, long k, long elements, long stride);
real THTensor_(medianall)(THTensor *tensor)
{
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
real theMedian;
ptrdiff_t numel;
long k;
THTensor *temp_;
real *temp__data;
numel = THTensor_(nElement)(tensor);
k = (numel-1) >> 1;
temp_ = THTensor_(newClone)(tensor);
temp__data = THTensor_(data)(temp_);
THTensor_(quickselectnoidx)(temp__data, k, numel, 1);
theMedian = temp__data[k];
THTensor_(free)(temp_);
return theMedian;
}
accreal THTensor_(sumall)(THTensor *tensor)
{
accreal sum = 0;
TH_TENSOR_APPLY(real, tensor, sum += *tensor_data;);
return sum;
}
accreal THTensor_(prodall)(THTensor *tensor)
{
accreal prod = 1;
TH_TENSOR_APPLY(real, tensor, prod *= *tensor_data;);
return prod;
}
void THTensor_(add)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(adds)(r__data, t_data, value, r__len););
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data + value;);
}
}
void THTensor_(sub)(THTensor *r_, THTensor *t, real value)
{
THTensor_(add)(r_, t, -value);
}
void THTensor_(mul)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(muls)(r__data, t_data, value, r__len););
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * value;);
}
}
void THTensor_(div)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(divs)(r__data, t_data, value, r__len););
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data / value;);
}
}
void THTensor_(lshift)(THTensor *r_, THTensor *t, real value)
{
#if defined(TH_REAL_IS_FLOAT)
return THTensor_(mul)(r_, t, powf(2, value));
#elif defined(TH_REAL_IS_DOUBLE)
return THTensor_(mul)(r_, t, pow(2, value));
#elif defined(TH_REAL_IS_HALF)
return THError("lshift is not supported for torch.HalfTensor");
#else
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) &&
THTensor_(isContiguous)(t) &&
THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i)
for (i=0; i<sz; i++) {
#if defined(TH_REAL_IS_BYTE)
rp[i] = ((real) tp[i]) << value;
#else
rp[i] = ((unsigned real) tp[i]) << value;
#endif
}
} else {
#if defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((real) *t_data) << value););
#else
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((unsigned real) *t_data) << value););
#endif
}
#endif
}
void THTensor_(rshift)(THTensor *r_, THTensor *t, real value)
{
#if defined(TH_REAL_IS_FLOAT)
return THTensor_(div)(r_, t, powf(2, value));
#elif defined(TH_REAL_IS_DOUBLE)
return THTensor_(div)(r_, t, pow(2, value));
#elif defined(TH_REAL_IS_HALF)
return THError("rshift is not supported for torch.HalfTensor");
#else
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) &&
THTensor_(isContiguous)(t) &&
THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i)
for (i=0; i<sz; i++) {
#if defined(TH_REAL_IS_BYTE)
rp[i] = ((real) tp[i]) >> value;
#else
rp[i] = ((unsigned real) tp[i]) >> value;
#endif
}
} else {
#if defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((real) *t_data) >> value););
#else
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((unsigned real) *t_data) >> value););
#endif
}
#endif
}
void THTensor_(fmod)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
rp[i] = fmod(tp[i], value);
#else
rp[i] = tp[i] % value;
#endif
}
} else {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = fmod(*t_data, value););
#else
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (*t_data % value););
#endif
}
}
void THTensor_(remainder)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
rp[i] = (value == 0)? NAN : tp[i] - value * floor(tp[i] / value);
#else
// There is no NAN for integers
rp[i] = tp[i] % value;
if (rp[i] * value < 0)
rp[i] += value;
#endif
}
} else {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (value == 0)? NAN : *t_data - value * floor(*t_data / value););
#else
// There is no NAN for integers
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data % value;
if (*r__data * value < 0) *r__data += value;);
#endif
}
}
void THTensor_(bitand)(THTensor *r_, THTensor *t, real value)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
return THError("bitand is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) &&
THTensor_(isContiguous)(t) &&
THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i)
for (i=0; i<sz; i++) {
rp[i] = tp[i] & value;
}
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data & value;);
}
#endif
}
void THTensor_(bitor)(THTensor *r_, THTensor *t, real value)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
return THError("bitor is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) &&
THTensor_(isContiguous)(t) &&
THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i)
for (i=0; i<sz; i++) {
rp[i] = tp[i] | value;
}
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data | value;);
}
#endif
}
void THTensor_(bitxor)(THTensor *r_, THTensor *t, real value)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
return THError("bitxor is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) &&
THTensor_(isContiguous)(t) &&
THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long sz = THTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i)
for (i=0; i<sz; i++) {
rp[i] = tp[i] ^ value;
}
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data ^ value;);
}
#endif
}
void THTensor_(clamp)(THTensor *r_, THTensor *t, real min_value, real max_value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
/* real t_val; */
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = (tp[i] < min_value) ? min_value : (tp[i] > max_value ? max_value : tp[i]);
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (*t_data < min_value) ? min_value : (*t_data > max_value ? max_value : *t_data););
}
}
void THTensor_(cadd)(THTensor *r_, THTensor *t, real value, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
if(r_ == t) {
THBlas_(axpy)(THTensor_(nElement)(t), value, THTensor_(data)(src), 1, THTensor_(data)(r_), 1);
} else {
TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cadd)(r__data, t_data, src_data, value, r__len););
}
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data + value * *src_data;);
}
}
void THTensor_(csub)(THTensor *r_, THTensor *t, real value,THTensor *src)
{
THTensor_(cadd)(r_, t, -value, src);
}
void THTensor_(cmul)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cmul)(r__data, t_data, src_data, r__len););
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * *src_data;);
}
}
void THTensor_(cpow)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = pow(tp[i], sp[i]);
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = pow(*t_data, *src_data););
}
}
void THTensor_(cdiv)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cdiv)(r__data, t_data, src_data, r__len););
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / *src_data;);
}
}
void THTensor_(clshift)(THTensor *r_, THTensor *t, THTensor *src)
{
#if defined(TH_REAL_IS_HALF)
return THError("clshift is not supported for torch.HalfTensor");
#endif
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) &&
THTensor_(isContiguous)(t) &&
THTensor_(isContiguous)(src) &&
THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++) {
#if defined(TH_REAL_IS_FLOAT)
rp[i] = tp[i] * powf(2, sp[i]);
#elif defined(TH_REAL_IS_DOUBLE)
rp[i] = tp[i] * pow(2, sp[i]);
#elif defined(TH_REAL_IS_BYTE)
rp[i] = ((real) tp[i]) << sp[i];
#else
rp[i] = ((unsigned real) tp[i]) << sp[i];
#endif
}
} else {
#if defined(TH_REAL_IS_FLOAT)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * powf(2, *src_data););
#elif defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * pow(2, *src_data););
#elif defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((real)*t_data) << *src_data;);
#else
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((unsigned real)*t_data) << *src_data;);
#endif
}
}
void THTensor_(crshift)(THTensor *r_, THTensor *t, THTensor *src)
{
#if defined(TH_REAL_IS_HALF)
return THError("crshift is not supported for torch.HalfTensor");
#endif
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) &&
THTensor_(isContiguous)(t) &&
THTensor_(isContiguous)(src) &&
THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++) {
#if defined(TH_REAL_IS_FLOAT)
rp[i] = tp[i] / powf(2, sp[i]);
#elif defined(TH_REAL_IS_DOUBLE)
rp[i] = tp[i] / pow(2, sp[i]);
#elif defined(TH_REAL_IS_BYTE)
rp[i] = ((real) tp[i]) >> sp[i];
#else
rp[i] = ((unsigned real) tp[i]) >> sp[i];
#endif
}
} else {
#if defined(TH_REAL_IS_FLOAT)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / powf(2, *src_data););
#elif defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / pow(2, *src_data););
#elif defined(TH_REAL_IS_BYTE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((real)*t_data) >> *src_data;);
#else
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((unsigned real)*t_data) >> *src_data;);
#endif
}
}
void THTensor_(cfmod)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
rp[i] = fmod(tp[i], sp[i]);
#else
rp[i] = tp[i] % sp[i];
#endif
}
} else {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = fmod(*t_data, *src_data););
#else
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = (*t_data % *src_data););
#endif
}
}
void THTensor_(cremainder)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
rp[i] = (sp[i] == 0)? NAN : tp[i] - sp[i] * floor(tp[i] / sp[i]);
#else
// There is no NAN for integers
rp[i] = tp[i] % sp[i];
if (rp[i] * sp[i] < 0)
rp[i] += sp[i];
#endif
}
} else {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = (*src_data == 0)? NAN : *t_data - *src_data * floor(*t_data / *src_data););
#else
// There is no NAN for integers
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data % *src_data;
if (*r__data * *src_data < 0) *r__data += *src_data;);
#endif
}
}
void THTensor_(cbitand)(THTensor *r_, THTensor *t, THTensor *src)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
return THError("cbitand is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) &&
THTensor_(isContiguous)(t) &&
THTensor_(isContiguous)(src) &&
THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++) {
rp[i] = tp[i] & sp[i];
}
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data & *src_data;);
}
#endif
}
void THTensor_(cbitor)(THTensor *r_, THTensor *t, THTensor *src)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
return THError("cbitor is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) &&
THTensor_(isContiguous)(t) &&
THTensor_(isContiguous)(src) &&
THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++) {
rp[i] = tp[i] | sp[i];
}
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data | *src_data;);
}
#endif
}
void THTensor_(cbitxor)(THTensor *r_, THTensor *t, THTensor *src)
{
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF)
return THError("cbitxor is only supported for integer type tensors");
#else
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) &&
THTensor_(isContiguous)(t) &&
THTensor_(isContiguous)(src) &&
THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++) {
rp[i] = tp[i] ^ sp[i];
}
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data ^ *src_data;);
}
#endif
}
void THTensor_(tpow)(THTensor *r_, real value, THTensor *t)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
ptrdiff_t sz = THTensor_(nElement)(t);
ptrdiff_t i;
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = pow(value, tp[i]);
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = pow(value, *t_data););
}
}
void THTensor_(addcmul)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2)
{
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data * *src2_data;);
}
void THTensor_(addcdiv)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2)
{
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data / *src2_data;);
}
void THTensor_(addmv)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *mat, THTensor *vec)
{
if( (mat->nDimension != 2) || (vec->nDimension != 1) )
THError("matrix and vector expected, got %dD, %dD",
mat->nDimension, vec->nDimension);
if( mat->size[1] != vec->size[0] ) {
THDescBuff bm = THTensor_(sizeDesc)(mat);
THDescBuff bv = THTensor_(sizeDesc)(vec);
THError("size mismatch, %s, %s", bm.str, bv.str);
}
if(t->nDimension != 1)
THError("vector expected, got t: %dD", t->nDimension);
if(t->size[0] != mat->size[0]) {
THDescBuff bt = THTensor_(sizeDesc)(t);
THDescBuff bm = THTensor_(sizeDesc)(mat);
THError("size mismatch, t: %s, mat: %s", bt.str, bm.str);
}
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
if(mat->stride[0] == 1)
{
THBlas_(gemv)('n', mat->size[0], mat->size[1],
alpha, THTensor_(data)(mat), mat->stride[1],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
}
else if(mat->stride[1] == 1)
{
THBlas_(gemv)('t', mat->size[1], mat->size[0],
alpha, THTensor_(data)(mat), mat->stride[0],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
}
else
{
THTensor *cmat = THTensor_(newContiguous)(mat);
THBlas_(gemv)('t', mat->size[1], mat->size[0],
alpha, THTensor_(data)(cmat), cmat->stride[0],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
THTensor_(free)(cmat);
}
}
void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, real gain)
{
long N1 = m1->size[0];
long N2 = m2->size[0];
long dim;
real *m1_p;
real *m2_p;
real *r_p;
long i;
THTensor_(resize2d)(r_, N1, N2);
m1 = THTensor_(newContiguous)(m1);
m2 = THTensor_(newContiguous)(m2);
THTensor_(resize2d)(m1, N1, THTensor_(nElement)(m1) / N1);
THTensor_(resize2d)(m2, N2, THTensor_(nElement)(m2) / N2);
dim = m1->size[1];
THArgCheck(m1->size[1] == m2->size[1], 3, "m1 and m2 must have the same inner vector dim");
m1_p = THTensor_(data)(m1);
m2_p = THTensor_(data)(m2);
r_p = THTensor_(data)(r_);
#pragma omp parallel for private(i)
for (i=0; i<N1; i++) {
long j,k;
for (j=0; j<N2; j++) {
real sum = 0;
for (k=0; k<dim; k++) {
real term = m1_p[ i*dim + k ] - m2_p[ j*dim + k ];
sum += term*term;
}
r_p[ i*N2 + j ] = gain * sum;
}
}
THTensor_(free)(m1);
THTensor_(free)(m2);
}
void THTensor_(addmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *m1, THTensor *m2)
{
char transpose_r, transpose_m1, transpose_m2;
THTensor *r__, *m1_, *m2_;
if( (m1->nDimension != 2) || (m2->nDimension != 2))
THError("matrices expected, got %dD, %dD tensors", m1->nDimension, m2->nDimension);
if(m1->size[1] != m2->size[0]) {
THDescBuff bm1 = THTensor_(sizeDesc)(m1);
THDescBuff bm2 = THTensor_(sizeDesc)(m2);
THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str);
}
if( t->nDimension != 2 )
THError("matrix expected, got %dD tensor for t", t->nDimension);
if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) ) {
THDescBuff bt = THTensor_(sizeDesc)(t);
THDescBuff bm1 = THTensor_(sizeDesc)(m1);
THDescBuff bm2 = THTensor_(sizeDesc)(m2);
THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str);
}
if(t != r_)
{
THTensor_(resizeAs)(r_, t);
if (beta != 0.0) {
THTensor_(copy)(r_, t);
}
}
/* r_ */
if(r_->stride[0] == 1 &&
r_->stride[1] != 0)
{
transpose_r = 'n';
r__ = r_;
}
else if(r_->stride[1] == 1 &&
r_->stride[0] != 0)
{
THTensor *swap = m2;
m2 = m1;
m1 = swap;
transpose_r = 't';
r__ = r_;
}
else
{
transpose_r = 'n';
THTensor *transp_r_ = THTensor_(newTranspose)(r_, 0, 1);
r__ = THTensor_(newClone)(transp_r_);
THTensor_(free)(transp_r_);
THTensor_(transpose)(r__, NULL, 0, 1);
}
/* m1 */
if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 &&
m1->stride[(transpose_r == 'n' ? 1 : 0)] != 0)
{
transpose_m1 = 'n';
m1_ = m1;
}
else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 &&
m1->stride[(transpose_r == 'n' ? 0 : 1)] != 0)
{
transpose_m1 = 't';
m1_ = m1;
}
else
{
transpose_m1 = (transpose_r == 'n' ? 't' : 'n');
m1_ = THTensor_(newContiguous)(m1);
}
/* m2 */
if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 &&
m2->stride[(transpose_r == 'n' ? 1 : 0)] != 0)
{
transpose_m2 = 'n';
m2_ = m2;
}
else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 &&
m2->stride[(transpose_r == 'n' ? 0 : 1)] != 0)
{
transpose_m2 = 't';
m2_ = m2;
}
else
{
transpose_m2 = (transpose_r == 'n' ? 't' : 'n');
m2_ = THTensor_(newContiguous)(m2);
}
#pragma omp critical(blasgemm)
/* do the operation */
THBlas_(gemm)(transpose_m1,
transpose_m2,
r__->size[(transpose_r == 'n' ? 0 : 1)],
r__->size[(transpose_r == 'n' ? 1 : 0)],
m1_->size[(transpose_r == 'n' ? 1 : 0)],
alpha,
THTensor_(data)(m1_),
(transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]),
THTensor_(data)(m2_),
(transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]),
beta,
THTensor_(data)(r__),
r__->stride[(transpose_r == 'n' ? 1 : 0)]);
/* free intermediate variables */
if(m1_ != m1)
THTensor_(free)(m1_);
if(m2_ != m2)
THTensor_(free)(m2_);
if(r__ != r_)
THTensor_(freeCopyTo)(r__, r_);
}
void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *vec1, THTensor *vec2)
{
if( (vec1->nDimension != 1) || (vec2->nDimension != 1) )
THError("vector and vector expected, got %dD, %dD tensors",
vec1->nDimension, vec2->nDimension);
if(t->nDimension != 2)
THError("expected matrix, got %dD tensor for t", t->nDimension);
if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) ) {
THDescBuff bt = THTensor_(sizeDesc)(t);
THDescBuff bv1 = THTensor_(sizeDesc)(vec1);
THDescBuff bv2 = THTensor_(sizeDesc)(vec2);
THError("size mismatch, t: %s, vec1: %s, vec2: %s", bt.str, bv1.str, bv2.str);
}
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
if(beta == 0) {
THTensor_(zero)(r_);
}
else if(beta != 1)
THTensor_(mul)(r_, r_, beta);
if(r_->stride[0] == 1)
{
THBlas_(ger)(vec1->size[0], vec2->size[0],
alpha, THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(r_), r_->stride[1]);
}
else if(r_->stride[1] == 1)
{
THBlas_(ger)(vec2->size[0], vec1->size[0],
alpha, THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(r_), r_->stride[0]);
}
else
{
THTensor *cr = THTensor_(newClone)(r_);
THBlas_(ger)(vec2->size[0], vec1->size[0],
alpha, THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(cr), cr->stride[0]);
THTensor_(freeCopyTo)(cr, r_);
}
}
void THTensor_(addbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2)
{
long batch;
THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor");
THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor");
THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2,
"equal number of batches expected, got %d, %d",
THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0));
THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2,
"wrong matrix size, batch1: %dx%d, batch2: %dx%d",
THTensor_(size)(batch1, 1), THTensor_(size)(batch1,2),
THTensor_(size)(batch2, 1), THTensor_(size)(batch2,2));
long dim1 = THTensor_(size)(batch1, 1);
long dim2 = THTensor_(size)(batch2, 2);
THArgCheck(THTensor_(size)(t, 0) == dim1, 1, "output tensor of incorrect size");
THArgCheck(THTensor_(size)(t, 1) == dim2, 1, "output tensor of incorrect size");
if (t != result) {
THTensor_(resizeAs)(result, t);
if (beta != 0.0) {
THTensor_(copy)(result, t);
}
}
THTensor *matrix1 = THTensor_(new)();
THTensor *matrix2 = THTensor_(new)();
for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) {
THTensor_(select)(matrix1, batch1, 0, batch);
THTensor_(select)(matrix2, batch2, 0, batch);
THTensor_(addmm)(result, beta, result, alpha, matrix1, matrix2);
beta = 1; // accumulate output once
}
THTensor_(free)(matrix1);
THTensor_(free)(matrix2);
}
void THTensor_(baddbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2)
{
long batch;
THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch1));
THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch2));
THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2,
"equal number of batches expected, got %d, %d",
THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0));
THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2,
"wrong matrix size, batch1: %dx%d, batch2: %dx%d",
THTensor_(size)(batch1, 1), THTensor_(size)(batch1, 2),
THTensor_(size)(batch2, 1), THTensor_(size)(batch2, 2));
long bs = THTensor_(size)(batch1, 0);
long dim1 = THTensor_(size)(batch1, 1);
long dim2 = THTensor_(size)(batch2, 2);
THArgCheck(THTensor_(size)(t, 0) == bs, 1, "output tensor of incorrect size");
THArgCheck(THTensor_(size)(t, 1) == dim1, 1, "output tensor of incorrect size");
THArgCheck(THTensor_(size)(t, 2) == dim2, 1, "output tensor of incorrect size");
if (t != result) {
THTensor_(resizeAs)(result, t);
if (beta != 0.0) {
THTensor_(copy)(result, t);
}
}
THTensor *matrix1 = THTensor_(new)();
THTensor *matrix2 = THTensor_(new)();
THTensor *result_matrix = THTensor_(new)();
for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) {
THTensor_(select)(matrix1, batch1, 0, batch);
THTensor_(select)(matrix2, batch2, 0, batch);
THTensor_(select)(result_matrix, result, 0, batch);
THTensor_(addmm)(result_matrix, beta, result_matrix, alpha, matrix1, matrix2);
}
THTensor_(free)(matrix1);
THTensor_(free)(matrix2);
THTensor_(free)(result_matrix);
}
ptrdiff_t THTensor_(numel)(THTensor *t)
{
return THTensor_(nElement)(t);
}
void THTensor_(max)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
// two implementations optimized for data locality
if (t->stride[dimension] == 1) {
real theMax;
real value;
long theIndex;
long i;
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
theMax = t_data[0];
theIndex = 0;
for(i = 0; i < t_size; i++)
{
value = t_data[i*t_stride];
/* This is not the same as value>theMax in the case of NaNs */
if(!(value <= theMax))
{
theIndex = i;
theMax = value;
th_isnan_break(value)
}
}
*indices__data = theIndex;
*values__data = theMax;);
} else {
if (THTensor_(nDimension)(t) > 1) {
THTensor *t0 = THTensor_(newSelect)(t, dimension, 0);
THTensor_(copy)(values_, t0);
THTensor_(free)(t0);
} else {
THTensor_(fill)(values_, THTensor_(get1d)(t, 0));
}
THLongTensor_zero(indices_);
if(t->size[dimension] == 1) {
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
return;
}
THTensor *tempValues_ = THTensor_(newWithTensor)(values_);
// tempValues_.expand_as(t)
tempValues_->size[dimension] = t->size[dimension];
tempValues_->stride[dimension] = 0;
THLongTensor *tempIndices_ = THLongTensor_newWithTensor(indices_);
// tempIndices_.expand_as(t)
tempIndices_->size[dimension] = t->size[dimension];
tempIndices_->stride[dimension] = 0;
TH_TENSOR_APPLY3_D(real, t, real, tempValues_, long, tempIndices_, dimension,
if(!(*t_data <= *tempValues__data) && !th_isnan(*tempValues__data)) {
*tempValues__data = *t_data;
*tempIndices__data = *tempIndices__dimOffset;
});
THTensor_(free)(tempValues_);
THLongTensor_free(tempIndices_);
}
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
}
void THTensor_(min)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
// two implementations optimized for data locality
if (t->stride[dimension] == 1) {
real theMax;
real value;
long theIndex;
long i;
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
theMax = t_data[0];
theIndex = 0;
for(i = 0; i < t_size; i++)
{
value = t_data[i*t_stride];
/* This is not the same as value>theMax in the case of NaNs */
if(!(value >= theMax))
{
theIndex = i;
theMax = value;
th_isnan_break(value)
}
}
*indices__data = theIndex;
*values__data = theMax;);
} else {
if (THTensor_(nDimension)(t) > 1) {
THTensor *t0 = THTensor_(newSelect)(t, dimension, 0);
THTensor_(copy)(values_, t0);
THTensor_(free)(t0);
} else {
THTensor_(fill)(values_, THTensor_(get1d)(t, 0));
}
THLongTensor_zero(indices_);
if(t->size[dimension] == 1) {
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
return;
}
THTensor *tempValues_ = THTensor_(newWithTensor)(values_);
// tempValues_.expand_as(t)
tempValues_->size[dimension] = t->size[dimension];
tempValues_->stride[dimension] = 0;
THLongTensor *tempIndices_ = THLongTensor_newWithTensor(indices_);
// tempIndices_.expand_as(t)
tempIndices_->size[dimension] = t->size[dimension];
tempIndices_->stride[dimension] = 0;
TH_TENSOR_APPLY3_D(real, t, real, tempValues_, long, tempIndices_, dimension,
if(!(*t_data >= *tempValues__data) && !th_isnan(*tempValues__data)) {
*tempValues__data = *t_data;
*tempIndices__data = *tempIndices__dimOffset;
});
}
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
}
void THTensor_(sum)(THTensor *r_, THTensor *t, int dimension, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
// two implementations optimized for data locality
if (t->stride[dimension] == 1) {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride];
*r__data = (real)sum;);
} else {
THTensor_(zero)(r_);
THTensor *temp_ = THTensor_(newWithTensor)(r_);
// r_.expand_as(t)
temp_->size[dimension] = t->size[dimension];
temp_->stride[dimension] = 0;
TH_TENSOR_APPLY2(real, temp_, real, t, *temp__data = *temp__data + *t_data;);
THTensor_(free)(temp_);
}
if (!keepdim) {
THTensor_(squeeze1d)(r_, r_, dimension);
}
}
void THTensor_(prod)(THTensor *r_, THTensor *t, int dimension, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
// two implementations optimized for data locality
if (t->stride[dimension] == 1) {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal prod = 1;
long i;
for(i = 0; i < t_size; i++)
prod *= t_data[i*t_stride];
*r__data = (real)prod;);
} else {
THTensor_(fill)(r_, 1);
THTensor *temp_ = THTensor_(newWithTensor)(r_);
// r_.expand_as(t)
temp_->size[dimension] = t->size[dimension];
temp_->stride[dimension] = 0;
TH_TENSOR_APPLY2(real, temp_, real, t, *temp__data = *temp__data * *t_data;);
THTensor_(free)(temp_);
}
if (!keepdim) {
THTensor_(squeeze1d)(r_, r_, dimension);
}
}
void THTensor_(cumsum)(THTensor *r_, THTensor *t, int dimension)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
THTensor_(resizeAs)(r_, t);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal cumsum = 0;
long i;
for(i = 0; i < t_size; i++)
{
cumsum += t_data[i*t_stride];
r__data[i*r__stride] = (real)cumsum;
});
}
void THTensor_(cumprod)(THTensor *r_, THTensor *t, int dimension)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range",
dimension + TH_INDEX_BASE);
THTensor_(resizeAs)(r_, t);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal cumprod = 1;
long i;
for(i = 0; i < t_size; i++)
{
cumprod *= t_data[i*t_stride];
r__data[i*r__stride] = (real)cumprod;
});
}
void THTensor_(sign)(THTensor *r_, THTensor *t)
{
THTensor_(resizeAs)(r_, t);
#if defined (TH_REAL_IS_BYTE)
TH_TENSOR_APPLY2(real, r_, real, t,
if (*t_data > 0) *r__data = 1;
else *r__data = 0;);
#else
TH_TENSOR_APPLY2(real, r_, real, t,
if (*t_data > 0) *r__data = 1;
else if (*t_data < 0) *r__data = -1;
else *r__data = 0;);
#endif
}
accreal THTensor_(trace)(THTensor *t)
{
real *t_data = THTensor_(data)(t);
accreal sum = 0;
long i = 0;
long t_stride_0, t_stride_1, t_diag_size;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix");
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
t_diag_size = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1));
while(i < t_diag_size)
{
sum += t_data[i*(t_stride_0+t_stride_1)];
i++;
}
return sum;
}
void THTensor_(cross)(THTensor *r_, THTensor *a, THTensor *b, int dimension)
{
int i;
if(THTensor_(nDimension)(a) != THTensor_(nDimension)(b))
THError("inconsistent tensor dimension %dD, %dD",
THTensor_(nDimension)(a), THTensor_(nDimension)(b));
for(i = 0; i < THTensor_(nDimension)(a); i++)
{
if(THTensor_(size)(a, i) != THTensor_(size)(b, i)) {
THDescBuff ba = THTensor_(sizeDesc)(a);
THDescBuff bb = THTensor_(sizeDesc)(b);
THError("inconsistent tensor sizes %s, %s", ba.str, bb.str);
}
}
if(dimension < 0)
{
for(i = 0; i < THTensor_(nDimension)(a); i++)
{
if(THTensor_(size)(a, i) == 3)
{
dimension = i;
break;
}
}
if(dimension < 0) {
THDescBuff ba = THTensor_(sizeDesc)(a);
THError("no dimension of size 3 in a: %s", ba.str);
}
}
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(a), 3, "dimension %d out of range",
dimension + TH_INDEX_BASE);
THArgCheck(THTensor_(size)(a, dimension) == 3, 3, "dimension %d does not have size 3",
dimension + TH_INDEX_BASE);
THTensor_(resizeAs)(r_, a);
TH_TENSOR_DIM_APPLY3(real, a, real, b, real, r_, dimension,
r__data[0*r__stride] = a_data[1*a_stride]*b_data[2*b_stride] - a_data[2*a_stride]*b_data[1*b_stride];
r__data[1*r__stride] = a_data[2*a_stride]*b_data[0*b_stride] - a_data[0*a_stride]*b_data[2*b_stride];
r__data[2*r__stride] = a_data[0*a_stride]*b_data[1*b_stride] - a_data[1*a_stride]*b_data[0*b_stride];);
}
void THTensor_(cmax)(THTensor *r, THTensor *t, THTensor *src) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY3(real, r, real, t, real, src,
*r_data = *t_data > *src_data ? *t_data : *src_data;);
}
void THTensor_(cmin)(THTensor *r, THTensor *t, THTensor *src) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY3(real, r, real, t, real, src,
*r_data = *t_data < *src_data ? *t_data : *src_data;);
}
void THTensor_(cmaxValue)(THTensor *r, THTensor *t, real value) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY2(real, r, real, t,
*r_data = *t_data > value ? *t_data : value;);
}
void THTensor_(cminValue)(THTensor *r, THTensor *t, real value) {
THTensor_(resizeAs)(r, t);
TH_TENSOR_APPLY2(real, r, real, t,
*r_data = *t_data < value ? *t_data : value;);
}
void THTensor_(zeros)(THTensor *r_, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(zero)(r_);
}
void THTensor_(zerosLike)(THTensor *r_, THTensor *input)
{
THTensor_(resizeAs)(r_, input);
THTensor_(zero)(r_);
}
void THTensor_(onesLike)(THTensor *r_, THTensor *input)
{
THTensor_(resizeAs)(r_, input);
THTensor_(fill)(r_, 1);
}
void THTensor_(ones)(THTensor *r_, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(fill)(r_, 1);
}
void THTensor_(diag)(THTensor *r_, THTensor *t, int k)
{
THArgCheck(THTensor_(nDimension)(t) == 1 || THTensor_(nDimension)(t) == 2, 1, "matrix or a vector expected");
if(THTensor_(nDimension)(t) == 1)
{
real *t_data = THTensor_(data)(t);
long t_stride_0 = THTensor_(stride)(t, 0);
long t_size = THTensor_(size)(t, 0);
long sz = t_size + (k >= 0 ? k : -k);
real *r__data;
long r__stride_0;
long r__stride_1;
long i;
THTensor_(resize2d)(r_, sz, sz);
THTensor_(zero)(r_);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data += (k >= 0 ? k*r__stride_1 : -k*r__stride_0);
for(i = 0; i < t_size; i++)
r__data[i*(r__stride_0+r__stride_1)] = t_data[i*t_stride_0];
}
else
{
real *t_data = THTensor_(data)(t);
long t_stride_0 = THTensor_(stride)(t, 0);
long t_stride_1 = THTensor_(stride)(t, 1);
long sz;
real *r__data;
long r__stride_0;
long i;
if(k >= 0)
sz = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1)-k);
else
sz = THMin(THTensor_(size)(t, 0)+k, THTensor_(size)(t, 1));
THTensor_(resize1d)(r_, sz);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_, 0);
t_data += (k >= 0 ? k*t_stride_1 : -k*t_stride_0);
for(i = 0; i < sz; i++)
r__data[i*r__stride_0] = t_data[i*(t_stride_0+t_stride_1)];
}
}
void THTensor_(eye)(THTensor *r_, long n, long m)
{
real *r__data;
long i, sz;
THArgCheck(n > 0, 1, "invalid argument");
if(m <= 0)
m = n;
THTensor_(resize2d)(r_, n, m);
THTensor_(zero)(r_);
i = 0;
r__data = THTensor_(data)(r_);
sz = THMin(THTensor_(size)(r_, 0), THTensor_(size)(r_, 1));
for(i = 0; i < sz; i++)
r__data[i*(r_->stride[0]+r_->stride[1])] = 1;
}
void THTensor_(range)(THTensor *r_, accreal xmin, accreal xmax, accreal step)
{
ptrdiff_t size;
real i = 0;
THArgCheck(step > 0 || step < 0, 3, "step must be a non-null number");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound incoherent with step sign");
size = (ptrdiff_t) (((xmax - xmin) / step) + 1);
if (THTensor_(nElement)(r_) != size) {
THTensor_(resize1d)(r_, size);
}
TH_TENSOR_APPLY(real, r_, *r__data = xmin + (i++)*step;);
}
void THTensor_(arange)(THTensor *r_, accreal xmin, accreal xmax, accreal step) {
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
int m = fmod(xmax - xmin,step) == 0;
#else
int m = (xmax - xmin) % step == 0;
#endif
if (m)
xmax -= step;
THTensor_(range)(r_,xmin,xmax,step);
}
void THTensor_(randperm)(THTensor *r_, THGenerator *_generator, long n)
{
real *r__data;
long r__stride_0;
long i;
THArgCheck(n > 0, 1, "must be strictly positive");
THTensor_(resize1d)(r_, n);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_,0);
for(i = 0; i < n; i++)
r__data[i*r__stride_0] = (real)(i);
for(i = 0; i < n-1; i++)
{
long z = THRandom_random(_generator) % (n-i);
real sav = r__data[i*r__stride_0];
r__data[i*r__stride_0] = r__data[(z+i)*r__stride_0];
r__data[(z+i)*r__stride_0] = sav;
}
}
void THTensor_(reshape)(THTensor *r_, THTensor *t, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(copy)(r_, t);
}
/* I cut and pasted (slightly adapted) the quicksort code from
Sedgewick's 1978 "Implementing Quicksort Programs" article
http://www.csie.ntu.edu.tw/~b93076/p847-sedgewick.pdf
It is the state of the art existing implementation. The macros
are here to make as close a match as possible to the pseudocode of
Program 2 p.851
Note that other partition schemes exist, and are typically presented
in textbook, but those are less efficient. See e.g.
http://cs.stackexchange.com/questions/11458/quicksort-partitioning-hoare-vs-lomuto
Julien, November 12th 2013
*/
#define MAX_LEVELS 300
#define M_SMALL 10 /* Limit for small subfiles */
#define ARR(III) arr[(III)*stride]
#define IDX(III) idx[(III)*stride]
#define LONG_SWAP(AAA, BBB) swap = AAA; AAA = BBB; BBB = swap
#define REAL_SWAP(AAA, BBB) rswap = AAA; AAA = BBB; BBB = rswap
#define ARR_SWAP(III, JJJ) \
REAL_SWAP(ARR(III), ARR(JJJ));
#define BOTH_SWAP(III, JJJ) \
REAL_SWAP(ARR(III), ARR(JJJ)); \
LONG_SWAP(IDX(III), IDX(JJJ))
static void THTensor_(quicksortascend)(real *arr, long *idx, long elements, long stride)
{
long beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left;
real rswap, piv;
unsigned char done = 0;
/* beg[0]=0; end[0]=elements; */
stack = 0;
L = 0; R = elements-1;
done = elements-1 <= M_SMALL;
while(!done) {
/* Use median of three for pivot choice */
P=(L+R)>>1;
BOTH_SWAP(P, L+1);
if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); }
if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); }
if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); }
i = L+1; j = R; piv = ARR(L); pid = IDX(L);
do {
do { i = i+1; } while(ARR(i) < piv);
do { j = j-1; } while(ARR(j) > piv);
if (j < i)
break;
BOTH_SWAP(i, j);
} while(1);
BOTH_SWAP(L, j);
/* Left subfile is (L, j-1) */
/* Right subfile is (i, R) */
sz_left = j-L;
sz_right = R-i+1;
if (sz_left <= M_SMALL && sz_right <= M_SMALL) {
/* both subfiles are small */
/* if stack empty */
if (stack == 0) {
done = 1;
} else {
stack--;
L = beg[stack];
R = end[stack];
}
} else if (sz_left <= M_SMALL || sz_right <= M_SMALL) {
/* exactly one of the subfiles is small */
/* (L,R) = large subfile */
if (sz_left > sz_right) {
/* Implicit: L = L; */
R = j-1;
} else {
L = i;
/* Implicit: R = R; */
}
} else {
/* none of the subfiles is small */
/* push large subfile */
/* (L,R) = small subfile */
if (sz_left > sz_right) {
beg[stack] = L;
end[stack] = j-1;
stack++;
L = i;
/* Implicit: R = R */
} else {
beg[stack] = i;
end[stack] = R;
stack++;
/* Implicit: L = L; */
R = j-1;
}
}
} /* while not done */
/* Now insertion sort on the concatenation of subfiles */
for(i=elements-2; i>=0; i--) {
if (ARR(i) > ARR(i+1)) {
piv = ARR(i);
pid = IDX(i);
j = i+1;
do {
ARR(j-1) = ARR(j);
IDX(j-1) = IDX(j);
j = j+1;
} while(j < elements && ARR(j) < piv);
ARR(j-1) = piv;
IDX(j-1) = pid;
}
}
}
static void THTensor_(quicksortdescend)(real *arr, long *idx, long elements, long stride)
{
long beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left;
real rswap, piv;
unsigned char done = 0;
/* beg[0]=0; end[0]=elements; */
stack = 0;
L = 0; R = elements-1;
done = elements-1 <= M_SMALL;
while(!done) {
/* Use median of three for pivot choice */
P=(L+R)>>1;
BOTH_SWAP(P, L+1);
if (ARR(L+1) < ARR(R)) { BOTH_SWAP(L+1, R); }
if (ARR(L) < ARR(R)) { BOTH_SWAP(L, R); }
if (ARR(L+1) < ARR(L)) { BOTH_SWAP(L+1, L); }
i = L+1; j = R; piv = ARR(L); pid = IDX(L);
do {
do { i = i+1; } while(ARR(i) > piv);
do { j = j-1; } while(ARR(j) < piv);
if (j < i)
break;
BOTH_SWAP(i, j);
} while(1);
BOTH_SWAP(L, j);
/* Left subfile is (L, j-1) */
/* Right subfile is (i, R) */
sz_left = j-L;
sz_right = R-i+1;
if (sz_left <= M_SMALL && sz_right <= M_SMALL) {
/* both subfiles are small */
/* if stack empty */
if (stack == 0) {
done = 1;
} else {
stack--;
L = beg[stack];
R = end[stack];
}
} else if (sz_left <= M_SMALL || sz_right <= M_SMALL) {
/* exactly one of the subfiles is small */
/* (L,R) = large subfile */
if (sz_left > sz_right) {
/* Implicit: L = L; */
R = j-1;
} else {
L = i;
/* Implicit: R = R; */
}
} else {
/* none of the subfiles is small */
/* push large subfile */
/* (L,R) = small subfile */
if (sz_left > sz_right) {
beg[stack] = L;
end[stack] = j-1;
stack++;
L = i;
/* Implicit: R = R */
} else {
beg[stack] = i;
end[stack] = R;
stack++;
/* Implicit: L = L; */
R = j-1;
}
}
} /* while not done */
/* Now insertion sort on the concatenation of subfiles */
for(i=elements-2; i>=0; i--) {
if (ARR(i) < ARR(i+1)) {
piv = ARR(i);
pid = IDX(i);
j = i+1;
do {
ARR(j-1) = ARR(j);
IDX(j-1) = IDX(j);
j = j+1;
} while(j < elements && ARR(j) > piv);
ARR(j-1) = piv;
IDX(j-1) = pid;
}
}
}
#undef MAX_LEVELS
#undef M_SMALL
void THTensor_(sort)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int dimension, int descendingOrder)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d",
dimension + TH_INDEX_BASE);
THTensor_(resizeAs)(rt_, t);
THTensor_(copy)(rt_, t);
{
THLongStorage *size = THTensor_(newSizeOf)(t);
THLongTensor_resize(ri_, size, NULL);
THLongStorage_free(size);
}
if(descendingOrder)
{
TH_TENSOR_DIM_APPLY2(real, rt_, long, ri_, dimension,
long i;
for(i = 0; i < ri__size; i++)
ri__data[i*ri__stride] = i;
THTensor_(quicksortdescend)(rt__data, ri__data, rt__size, rt__stride);)
}
else
{
TH_TENSOR_DIM_APPLY2(real, rt_, long, ri_, dimension,
long i;
for(i = 0; i < ri__size; i++)
ri__data[i*ri__stride] = i;
THTensor_(quicksortascend)(rt__data, ri__data, rt__size, rt__stride);)
}
}
/* Implementation of the Quickselect algorithm, based on Nicolas Devillard's
public domain implementation at http://ndevilla.free.fr/median/median/
Adapted similarly to the above Quicksort algorithm.
This version does not produce indices along with values. */
static void THTensor_(quickselectnoidx)(real *arr, long k, long elements, long stride)
{
long P, L, R, i, j, swap;
real rswap, piv;
L = 0;
R = elements-1;
do {
if (R <= L) /* One element only */
return;
if (R == L+1) { /* Two elements only */
if (ARR(L) > ARR(R)) {
ARR_SWAP(L, R);
}
return;
}
/* Use median of three for pivot choice */
P=(L+R)>>1;
ARR_SWAP(P, L+1);
if (ARR(L+1) > ARR(R)) { ARR_SWAP(L+1, R); }
if (ARR(L) > ARR(R)) { ARR_SWAP(L, R); }
if (ARR(L+1) > ARR(L)) { ARR_SWAP(L+1, L); }
i = L+1;
j = R;
piv = ARR(L);
do {
do i++; while(ARR(i) < piv);
do j--; while(ARR(j) > piv);
if (j < i)
break;
ARR_SWAP(i, j);
} while(1);
ARR_SWAP(L, j);
/* Re-set active partition */
if (j <= k) L=i;
if (j >= k) R=j-1;
} while(1);
}
/* Implementation of the Quickselect algorithm, based on Nicolas Devillard's
public domain implementation at http://ndevilla.free.fr/median/median/
Adapted similarly to the above Quicksort algorithm. */
static void THTensor_(quickselect)(real *arr, long *idx, long k, long elements, long stride)
{
long P, L, R, i, j, swap, pid;
real rswap, piv;
L = 0;
R = elements-1;
do {
if (R <= L) /* One element only */
return;
if (R == L+1) { /* Two elements only */
if (ARR(L) > ARR(R)) {
BOTH_SWAP(L, R);
}
return;
}
/* Use median of three for pivot choice */
P=(L+R)>>1;
BOTH_SWAP(P, L+1);
if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); }
if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); }
if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); }
i = L+1;
j = R;
piv = ARR(L);
pid = IDX(L);
do {
do i++; while(ARR(i) < piv);
do j--; while(ARR(j) > piv);
if (j < i)
break;
BOTH_SWAP(i, j);
} while(1);
BOTH_SWAP(L, j);
/* Re-set active partition */
if (j <= k) L=i;
if (j >= k) R=j-1;
} while(1);
}
#undef ARR
#undef IDX
#undef LONG_SWAP
#undef REAL_SWAP
#undef BOTH_SWAP
void THTensor_(mode)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim)
{
THLongStorage *dim;
THTensor *temp_;
THLongTensor *tempi_;
real *temp__data;
long *tempi__data;
long t_size_dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
t_size_dim = THTensor_(size)(t, dimension);
temp_ = THTensor_(new)();
THTensor_(resize1d)(temp_, t_size_dim);
temp__data = THTensor_(data)(temp_);
tempi_ = THLongTensor_new();
THLongTensor_resize1d(tempi_, t_size_dim);
tempi__data = THLongTensor_data(tempi_);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
long i;
real mode = 0;
long modei = 0;
long temp_freq = 0;
long max_freq = 0;
for(i = 0; i < t_size_dim; i++)
temp__data[i] = t_data[i*t_stride];
for(i = 0; i < t_size_dim; i++)
tempi__data[i] = i;
THTensor_(quicksortascend)(temp__data, tempi__data, t_size_dim, 1);
for(i = 0; i < t_size_dim; i++)
{
temp_freq++;
if ((i == t_size_dim - 1) || (temp__data[i] != temp__data[i+1]))
{
if (temp_freq > max_freq)
{
mode = temp__data[i];
modei = tempi__data[i];
max_freq = temp_freq;
}
temp_freq = 0;
}
}
*values__data = mode;
*indices__data = modei;);
THTensor_(free)(temp_);
THLongTensor_free(tempi_);
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
}
void THTensor_(kthvalue)(THTensor *values_, THLongTensor *indices_, THTensor *t, long k, int dimension, int keepdim)
{
THLongStorage *dim;
THTensor *temp_;
THLongTensor *tempi_;
real *temp__data;
long *tempi__data;
long t_size_dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range");
THArgCheck(k > 0 && k <= t->size[dimension], 2, "selected index out of range");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
t_size_dim = THTensor_(size)(t, dimension);
temp_ = THTensor_(new)();
THTensor_(resize1d)(temp_, t_size_dim);
temp__data = THTensor_(data)(temp_);
tempi_ = THLongTensor_new();
THLongTensor_resize1d(tempi_, t_size_dim);
tempi__data = THLongTensor_data(tempi_);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
long i;
for(i = 0; i < t_size_dim; i++)
temp__data[i] = t_data[i*t_stride];
for(i = 0; i < t_size_dim; i++)
tempi__data[i] = i;
THTensor_(quickselect)(temp__data, tempi__data, k - 1, t_size_dim, 1);
*values__data = temp__data[k-1];
*indices__data = tempi__data[k-1];);
THTensor_(free)(temp_);
THLongTensor_free(tempi_);
if (!keepdim) {
THTensor_(squeeze1d)(values_, values_, dimension);
THLongTensor_squeeze1d(indices_, indices_, dimension);
}
}
void THTensor_(median)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim)
{
long t_size_dim, k;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range");
t_size_dim = THTensor_(size)(t, dimension);
k = (t_size_dim-1) >> 1; /* take middle or one-before-middle element */
THTensor_(kthvalue)(values_, indices_, t, k+1, dimension, keepdim);
}
void THTensor_(topk)(THTensor *rt_, THLongTensor *ri_, THTensor *t, long k, int dim, int dir, int sorted)
{
int numDims = THTensor_(nDimension)(t);
THArgCheck(dim >= 0 && dim < numDims, 3, "dim not in range");
long sliceSize = THTensor_(size)(t, dim);
THArgCheck(k > 0 && k <= sliceSize, 2, "k not in range for dimension");
THTensor *tmpResults = THTensor_(new)();
THTensor_(resize1d)(tmpResults, sliceSize);
real *tmp__data = THTensor_(data)(tmpResults);
THLongTensor *tmpIndices = THLongTensor_new();
THLongTensor_resize1d(tmpIndices, sliceSize);
long *tmpi__data = THLongTensor_data(tmpIndices);
THLongStorage *topKSize = THTensor_(newSizeOf)(t);
THLongStorage_set(topKSize, dim, k);
THTensor_(resize)(rt_, topKSize, NULL);
THLongTensor_resize(ri_, topKSize, NULL);
THLongStorage_free(topKSize);
if (dir) {
/* k largest elements, descending order (optional: see sorted) */
long K = sliceSize - k;
TH_TENSOR_DIM_APPLY3(real, t, real, rt_, long, ri_, dim,
long i;
for(i = 0; i < sliceSize; i++)
{
tmp__data[i] = t_data[i*t_stride];
tmpi__data[i] = i;
}
if (K > 0)
THTensor_(quickselect)(tmp__data, tmpi__data, K - 1, sliceSize, 1);
if (sorted)
THTensor_(quicksortdescend)(tmp__data + K, tmpi__data + K, k, 1);
for(i = 0; i < k; i++)
{
rt__data[i*rt__stride] = tmp__data[i + K];
ri__data[i*ri__stride] = tmpi__data[i + K];
})
}
else {
/* k smallest elements, ascending order (optional: see sorted) */
TH_TENSOR_DIM_APPLY3(real, t, real, rt_, long, ri_, dim,
long i;
for(i = 0; i < sliceSize; i++)
{
tmp__data[i] = t_data[i*t_stride];
tmpi__data[i] = i;
}
THTensor_(quickselect)(tmp__data, tmpi__data, k - 1, sliceSize, 1);
if (sorted)
THTensor_(quicksortascend)(tmp__data, tmpi__data, k - 1, 1);
for(i = 0; i < k; i++)
{
rt__data[i*rt__stride] = tmp__data[i];
ri__data[i*ri__stride] = tmpi__data[i];
})
}
THTensor_(free)(tmpResults);
THLongTensor_free(tmpIndices);
}
void THTensor_(tril)(THTensor *r_, THTensor *t, long k)
{
long t_size_0, t_size_1;
long t_stride_0, t_stride_1;
long r__stride_0, r__stride_1;
real *t_data, *r__data;
long r, c;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix");
THTensor_(resizeAs)(r_, t);
t_size_0 = THTensor_(size)(t, 0);
t_size_1 = THTensor_(size)(t, 1);
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data = THTensor_(data)(r_);
t_data = THTensor_(data)(t);
for(r = 0; r < t_size_0; r++)
{
long sz = THMin(r+k+1, t_size_1);
for(c = THMax(0, r+k+1); c < t_size_1; c++)
r__data[r*r__stride_0+c*r__stride_1] = 0;
for(c = 0; c < sz; c++)
r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1];
}
}
void THTensor_(triu)(THTensor *r_, THTensor *t, long k)
{
long t_size_0, t_size_1;
long t_stride_0, t_stride_1;
long r__stride_0, r__stride_1;
real *t_data, *r__data;
long r, c;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix");
THTensor_(resizeAs)(r_, t);
t_size_0 = THTensor_(size)(t, 0);
t_size_1 = THTensor_(size)(t, 1);
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data = THTensor_(data)(r_);
t_data = THTensor_(data)(t);
for(r = 0; r < t_size_0; r++)
{
long sz = THMin(r+k, t_size_1);
for(c = THMax(0, r+k); c < t_size_1; c++)
r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1];
for(c = 0; c < sz; c++)
r__data[r*r__stride_0+c*r__stride_1] = 0;
}
}
void THTensor_(cat)(THTensor *r_, THTensor *ta, THTensor *tb, int dimension)
{
THTensor* inputs[2];
inputs[0] = ta;
inputs[1] = tb;
THTensor_(catArray)(r_, inputs, 2, dimension);
}
void THTensor_(catArray)(THTensor *result, THTensor **inputs, int numInputs, int dimension)
{
THLongStorage *size;
int i, j;
long offset;
int maxDim = dimension + 1;
int allEmpty = 1;
int allContiguous = 1;
// cat_dimension is the actual dimension we cat along
int cat_dimension = dimension;
for (i = 0; i < numInputs; i++)
{
maxDim = THMax(maxDim, inputs[i]->nDimension);
}
// When the user input dimension is -1 (i.e. -2 in C)
// Then we pick the maximum last dimension across all tensors.
if ( dimension + TH_INDEX_BASE == -1 )
{
cat_dimension = maxDim?(maxDim-1):0;
}
THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs);
THArgCheck(cat_dimension >= 0, 4, "invalid dimension %d", dimension + TH_INDEX_BASE);
size = THLongStorage_newWithSize(maxDim);
for(i = 0; i < maxDim; i++)
{
// dimSize is either the size of the dim if it exists, either 1 if #dim > 0, otherwise 0
long dimSize = i < inputs[0]->nDimension ? inputs[0]->size[i] : THMin(inputs[0]->nDimension, 1);
if (i == cat_dimension)
{
for (j = 1; j < numInputs; j++)
{
// accumulate the size over the dimension we want to cat on.
// Empty tensors are allowed
dimSize += i < inputs[j]->nDimension ? inputs[j]->size[i] : THMin(inputs[j]->nDimension, 1);
}
}
else
{
for (j = 1; j < numInputs; j++)
{
long sz = (i < inputs[j]->nDimension ? inputs[j]->size[i] : THMin(inputs[j]->nDimension, 1));
// If it's a dimension we're not catting on
// Then fail if sizes are different AND > 0
if (dimSize != sz && dimSize && sz)
{
THLongStorage_free(size);
THError("inconsistent tensor sizes");
}
else if(!dimSize)
{
dimSize = sz;
}
}
}
allEmpty = allEmpty && !dimSize;
size->data[i] = dimSize;
}
// Initiate catting and resizing
// If at least one of the input is not empty
if (!allEmpty)
{
THTensor_(resize)(result, size, NULL);
// Check contiguity of all inputs and result
for (i = 0; i < numInputs; i++) {
if(inputs[i]->nDimension) {
allContiguous = allContiguous && THTensor_(isContiguous)(inputs[i]);
}
}
allContiguous = allContiguous && THTensor_(isContiguous)(result);
// First path is for contiguous inputs along dim 1
// Second path for non-contiguous
if (cat_dimension == 0 && allContiguous)
{
real* result_data = result->storage->data + result->storageOffset;
offset = 0;
for (j = 0; j < numInputs; j++)
{
if (inputs[j]->nDimension)
{
THTensor* input0 = inputs[j];
real* input0_data = input0->storage->data + input0->storageOffset;
long input0_size = THTensor_(nElement)(input0);
memcpy(result_data + offset, input0_data, input0_size*sizeof(real));
offset += input0_size;
}
}
}
else
{
offset = 0;
for (j = 0; j < numInputs; j++)
{
if (inputs[j]->nDimension)
{
long dimSize = cat_dimension < inputs[j]->nDimension ? inputs[j]->size[cat_dimension] : 1;
THTensor *nt = THTensor_(newWithTensor)(result);
THTensor_(narrow)(nt, NULL, cat_dimension, offset, dimSize);
THTensor_(copy)(nt, inputs[j]);
THTensor_(free)(nt);
offset += dimSize;
}
}
}
}
THLongStorage_free(size);
}
int THTensor_(equal)(THTensor *ta, THTensor* tb)
{
int equal = 1;
if(!THTensor_(isSameSizeAs)(ta, tb))
return 0;
if (THTensor_(isContiguous)(ta) && THTensor_(isContiguous)(tb)) {
real *tap = THTensor_(data)(ta);
real *tbp = THTensor_(data)(tb);
ptrdiff_t sz = THTensor_(nElement)(ta);
ptrdiff_t i;
for (i=0; i<sz; ++i){
if(tap[i] != tbp[i]) return 0;
}
} else {
// Short-circuit the apply function on inequality
TH_TENSOR_APPLY2(real, ta, real, tb,
if (equal && *ta_data != *tb_data) {
equal = 0;
TH_TENSOR_APPLY_hasFinished = 1; break;
})
}
return equal;
}
#define TENSOR_IMPLEMENT_LOGICAL(NAME,OP) \
void THTensor_(NAME##Value)(THByteTensor *r_, THTensor* t, real value) \
{ \
THByteTensor_resizeNd(r_, t->nDimension, t->size, NULL); \
TH_TENSOR_APPLY2(unsigned char, r_, real, t, \
*r__data = (*t_data OP value) ? 1 : 0;); \
} \
void THTensor_(NAME##ValueT)(THTensor* r_, THTensor* t, real value) \
{ \
THTensor_(resizeNd)(r_, t->nDimension, t->size, NULL); \
TH_TENSOR_APPLY2(real, r_, real, t, \
*r__data = (*t_data OP value) ? 1 : 0;); \
} \
void THTensor_(NAME##Tensor)(THByteTensor *r_, THTensor *ta, THTensor *tb) \
{ \
THByteTensor_resizeNd(r_, ta->nDimension, ta->size, NULL); \
TH_TENSOR_APPLY3(unsigned char, r_, real, ta, real, tb, \
*r__data = (*ta_data OP *tb_data) ? 1 : 0;); \
} \
void THTensor_(NAME##TensorT)(THTensor *r_, THTensor *ta, THTensor *tb) \
{ \
THTensor_(resizeNd)(r_, ta->nDimension, ta->size, NULL); \
TH_TENSOR_APPLY3(real, r_, real, ta, real, tb, \
*r__data = (*ta_data OP *tb_data) ? 1 : 0;); \
} \
TENSOR_IMPLEMENT_LOGICAL(lt,<)
TENSOR_IMPLEMENT_LOGICAL(gt,>)
TENSOR_IMPLEMENT_LOGICAL(le,<=)
TENSOR_IMPLEMENT_LOGICAL(ge,>=)
TENSOR_IMPLEMENT_LOGICAL(eq,==)
TENSOR_IMPLEMENT_LOGICAL(ne,!=)
#define LAB_IMPLEMENT_BASIC_FUNCTION(NAME, CFUNC) \
void THTensor_(NAME)(THTensor *r_, THTensor *t) \
{ \
THTensor_(resizeAs)(r_, t); \
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data);); \
} \
#if defined(TH_REAL_IS_LONG)
LAB_IMPLEMENT_BASIC_FUNCTION(abs,labs)
LAB_IMPLEMENT_BASIC_FUNCTION(neg,-)
#endif /* long only part */
#if defined(TH_REAL_IS_SHORT) || defined(TH_REAL_IS_INT)
LAB_IMPLEMENT_BASIC_FUNCTION(abs,abs)
LAB_IMPLEMENT_BASIC_FUNCTION(neg,-)
#endif /* int only part */
#if defined(TH_REAL_IS_BYTE)
#define TENSOR_IMPLEMENT_LOGICAL_SUM(NAME, OP, INIT_VALUE) \
int THTensor_(NAME)(THTensor *tensor) \
{ \
THArgCheck(tensor->nDimension > 0, 1, "empty Tensor"); \
int sum = INIT_VALUE; \
TH_TENSOR_APPLY(real, tensor, sum = sum OP *tensor_data;); \
return sum; \
}
TENSOR_IMPLEMENT_LOGICAL_SUM(logicalall, &&, 1)
TENSOR_IMPLEMENT_LOGICAL_SUM(logicalany, ||, 0)
#endif /* Byte only part */
/* floating point only now */
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
#if defined (TH_REAL_IS_FLOAT)
#define TH_MATH_NAME(fn) fn##f
#else
#define TH_MATH_NAME(fn) fn
#endif
LAB_IMPLEMENT_BASIC_FUNCTION(log,TH_MATH_NAME(log))
LAB_IMPLEMENT_BASIC_FUNCTION(lgamma,TH_MATH_NAME(lgamma))
LAB_IMPLEMENT_BASIC_FUNCTION(log1p,TH_MATH_NAME(log1p))
LAB_IMPLEMENT_BASIC_FUNCTION(sigmoid,TH_MATH_NAME(TH_sigmoid))
LAB_IMPLEMENT_BASIC_FUNCTION(exp,TH_MATH_NAME(exp))
LAB_IMPLEMENT_BASIC_FUNCTION(cos,TH_MATH_NAME(cos))
LAB_IMPLEMENT_BASIC_FUNCTION(acos,TH_MATH_NAME(acos))
LAB_IMPLEMENT_BASIC_FUNCTION(cosh,TH_MATH_NAME(cosh))
LAB_IMPLEMENT_BASIC_FUNCTION(sin,TH_MATH_NAME(sin))
LAB_IMPLEMENT_BASIC_FUNCTION(asin,TH_MATH_NAME(asin))
LAB_IMPLEMENT_BASIC_FUNCTION(sinh,TH_MATH_NAME(sinh))
LAB_IMPLEMENT_BASIC_FUNCTION(tan,TH_MATH_NAME(tan))
LAB_IMPLEMENT_BASIC_FUNCTION(atan,TH_MATH_NAME(atan))
LAB_IMPLEMENT_BASIC_FUNCTION(tanh,TH_MATH_NAME(tanh))
LAB_IMPLEMENT_BASIC_FUNCTION(sqrt,TH_MATH_NAME(sqrt))
LAB_IMPLEMENT_BASIC_FUNCTION(rsqrt,TH_MATH_NAME(TH_rsqrt))
LAB_IMPLEMENT_BASIC_FUNCTION(ceil,TH_MATH_NAME(ceil))
LAB_IMPLEMENT_BASIC_FUNCTION(floor,TH_MATH_NAME(floor))
LAB_IMPLEMENT_BASIC_FUNCTION(round,TH_MATH_NAME(round))
LAB_IMPLEMENT_BASIC_FUNCTION(abs,TH_MATH_NAME(fabs))
LAB_IMPLEMENT_BASIC_FUNCTION(trunc,TH_MATH_NAME(trunc))
LAB_IMPLEMENT_BASIC_FUNCTION(frac,TH_MATH_NAME(TH_frac))
LAB_IMPLEMENT_BASIC_FUNCTION(neg,-)
LAB_IMPLEMENT_BASIC_FUNCTION(cinv, TH_MATH_NAME(1.0) / )
void THTensor_(pow)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if(value == 1){
THTensor_(copy)(r_, t);
}
else if(value == 2){
THTensor_(cmul)(r_, t, t);
}
else if(value == 3){
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = *t_data * *t_data * *t_data;);
}
else if(value == 0.5){
THTensor_(sqrt)(r_, t);
}
else if(value == -0.5){
THTensor_(rsqrt)(r_, t);
}
else if(value == -1){
THTensor_(cinv)(r_, t);
}
else if(value == -2){
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = TH_MATH_NAME(1.0) / (*t_data * *t_data););
}
else{
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = TH_MATH_NAME(pow)(*t_data, value););
}
}
void THTensor_(atan2)(THTensor *r_, THTensor *tx, THTensor *ty)
{
THTensor_(resizeAs)(r_, tx);
TH_TENSOR_APPLY3(real, r_, real, tx, real, ty, *r__data = TH_MATH_NAME(atan2)(*tx_data,*ty_data););
}
void THTensor_(lerp)(THTensor *r_, THTensor *a, THTensor *b, real weight)
{
THArgCheck(THTensor_(nElement)(a) == THTensor_(nElement)(b), 2, "sizes do not match");
THTensor_(resizeAs)(r_, a);
TH_TENSOR_APPLY3(real, r_, real, a, real, b, *r__data = TH_MATH_NAME(TH_lerp)(*a_data, *b_data, weight););
}
void THTensor_(mean)(THTensor *r_, THTensor *t, int dimension, int keepdim)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d",
dimension + TH_INDEX_BASE);
THTensor_(sum)(r_, t, dimension, keepdim);
THTensor_(div)(r_, r_, t->size[dimension]);
}
void THTensor_(std)(THTensor *r_, THTensor *t, int dimension, int biased, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
accreal sum2 = 0;
long i;
for(i = 0; i < t_size; i++)
{
real z = t_data[i*t_stride];
sum += z;
sum2 += z*z;
}
if(biased)
{
sum /= t_size;
sum2 /= t_size;
sum2 -= sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = (real)TH_MATH_NAME(sqrt)(sum2);
}
else
{
sum /= t_size;
sum2 /= t_size-1;
sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = (real)TH_MATH_NAME(sqrt)(sum2);
});
if (!keepdim) {
THTensor_(squeeze1d)(r_, r_, dimension);
}
}
void THTensor_(var)(THTensor *r_, THTensor *t, int dimension, int biased, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
accreal sum2 = 0;
long i;
for(i = 0; i < t_size; i++)
{
real z = t_data[i*t_stride];
sum += z;
sum2 += z*z;
}
if(biased)
{
sum /= t_size;
sum2 /= t_size;
sum2 -= sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = sum2;
}
else
{
sum /= t_size;
sum2 /= t_size-1;
sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = (real)sum2;
});
if (!keepdim) {
THTensor_(squeeze1d)(r_, r_, dimension);
}
}
void THTensor_(norm)(THTensor *r_, THTensor *t, real value, int dimension, int keepdim)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d",
dimension + TH_INDEX_BASE);
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
if(value == 0) {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride] != 0.0;
*r__data = sum;)
} else {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++) {
sum += TH_MATH_NAME(pow)(
TH_MATH_NAME(fabs)(t_data[i*t_stride]), value);
}
*r__data = TH_MATH_NAME(pow)(sum, 1.0/value);)
}
if (!keepdim) {
THTensor_(squeeze1d)(r_, r_, dimension);
}
}
accreal THTensor_(normall)(THTensor *tensor, real value)
{
accreal sum = 0;
if(value == 0) {
TH_TENSOR_APPLY(real, tensor, sum += *tensor_data != 0.0;);
return sum;
} else if(value == 1) {
TH_TENSOR_APPLY(real, tensor, sum += TH_MATH_NAME(fabs)(*tensor_data););
return sum;
} else if(value == 2) {
TH_TENSOR_APPLY(real, tensor, accreal z = *tensor_data; sum += z*z;);
return sqrt(sum);
} else {
TH_TENSOR_APPLY(real, tensor, sum += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(*tensor_data), value););
return TH_MATH_NAME(pow)(sum, 1.0/value);
}
}
void THTensor_(renorm)(THTensor *res, THTensor *src, real value, int dimension, real maxnorm)
{
int i;
THTensor *rowR, *rowS;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(src), 3, "invalid dimension %d",
dimension + TH_INDEX_BASE);
THArgCheck(value > 0, 2, "non-positive-norm not supported");
THArgCheck(THTensor_(nDimension)(src) > 1, 1, "need at least 2 dimensions, got %d dimensions",
THTensor_(nDimension)(src));
rowR = THTensor_(new)();
rowS = THTensor_(new)();
THTensor_(resizeAs)(res, src);
for (i=0; i<src->size[dimension]; i++)
{
real norm = 0;
real new_norm;
THTensor_(select)(rowS, src, dimension, i);
THTensor_(select)(rowR, res, dimension, i);
if (value == 1) {
TH_TENSOR_APPLY(real, rowS, norm += fabs(*rowS_data););
} else if (value == 2) {
TH_TENSOR_APPLY(real, rowS, accreal z = *rowS_data; norm += z*z;);
} else {
TH_TENSOR_APPLY(real, rowS, norm += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(*rowS_data), value););
}
norm = pow(norm, 1/value);
if (norm > maxnorm)
{
new_norm = maxnorm / (norm + 1e-7);
TH_TENSOR_APPLY2(
real, rowR, real, rowS,
*rowR_data = (*rowS_data) * new_norm;
)
}
else
THTensor_(copy)(rowR, rowS);
}
THTensor_(free)(rowR);
THTensor_(free)(rowS);
}
accreal THTensor_(dist)(THTensor *tensor, THTensor *src, real value)
{
real sum = 0;
TH_TENSOR_APPLY2(real, tensor, real, src,
sum += TH_MATH_NAME(pow)(
TH_MATH_NAME(fabs)(*tensor_data - *src_data), value););
return TH_MATH_NAME(pow)(sum, 1.0/value);
}
accreal THTensor_(meanall)(THTensor *tensor)
{
THArgCheck(tensor->nDimension > 0, 1, "empty Tensor");
return THTensor_(sumall)(tensor)/THTensor_(nElement)(tensor);
}
accreal THTensor_(varall)(THTensor *tensor, int biased)
{
accreal mean = THTensor_(meanall)(tensor);
accreal sum = 0;
TH_TENSOR_APPLY(real, tensor, sum += (*tensor_data - mean)*(*tensor_data - mean););
sum /= THTensor_(nElement)(tensor) - (biased ? 0 : 1);
return sum;
}
accreal THTensor_(stdall)(THTensor *tensor, int biased)
{
return sqrt(THTensor_(varall)(tensor, biased));
}
void THTensor_(linspace)(THTensor *r_, real a, real b, long n)
{
real i = 0;
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THTensor_(nElement)(r_) != n) {
THTensor_(resize1d)(r_, n);
}
if(n == 1) {
THTensor_(set1d)(r_, 0, a);
} else {
TH_TENSOR_APPLY(real, r_,
*r__data = a + i*(b-a)/((real)(n-1));
i++;
);
}
}
void THTensor_(logspace)(THTensor *r_, real a, real b, long n)
{
real i = 0;
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THTensor_(nElement)(r_) != n) {
THTensor_(resize1d)(r_, n);
}
if(n == 1) {
THTensor_(set1d)(r_, 0, TH_MATH_NAME(pow)(10.0, a));
} else {
TH_TENSOR_APPLY(real, r_,
*r__data = TH_MATH_NAME(pow)(10.0, a + i*(b-a)/((real)(n-1)));
i++;
);
}
}
void THTensor_(rand)(THTensor *r_, THGenerator *_generator, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(uniform)(r_, _generator, 0, 1);
}
void THTensor_(randn)(THTensor *r_, THGenerator *_generator, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(normal)(r_, _generator, 0, 1);
}
void THTensor_(histc)(THTensor *hist, THTensor *tensor, long nbins, real minvalue, real maxvalue)
{
real minval;
real maxval;
real *h_data;
THTensor_(resize1d)(hist, nbins);
THTensor_(zero)(hist);
minval = minvalue;
maxval = maxvalue;
if (minval == maxval)
{
minval = THTensor_(minall)(tensor);
maxval = THTensor_(maxall)(tensor);
}
if (minval == maxval)
{
minval = minval - 1;
maxval = maxval + 1;
}
h_data = THTensor_(data)(hist);
TH_TENSOR_APPLY(real, tensor,
if (*tensor_data >= minval && *tensor_data <= maxval) {
const int bin = (int)((*tensor_data-minval) / (maxval-minval) * nbins);
h_data[THMin(bin, nbins-1)] += 1;
}
);
}
void THTensor_(bhistc)(THTensor *hist, THTensor *tensor, long nbins, real minvalue, real maxvalue)
{
THArgCheck(THTensor_(nDimension)(tensor) < 3, 2, "invalid dimension %d, the input must be a 2d tensor", THTensor_(nDimension)(tensor));
int dimension = 1;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(tensor), 2, "invalid dimension %d",
dimension + TH_INDEX_BASE);
real minval;
real maxval;
real *h_data;
THTensor_(resize2d)(hist, tensor->size[0], nbins);
THTensor_(zero)(hist);
minval = minvalue;
maxval = maxvalue;
if (minval == maxval)
{
minval = THTensor_(minall)(tensor);
maxval = THTensor_(maxall)(tensor);
}
if (minval == maxval)
{
minval = minval - 1;
maxval = maxval + 1;
}
TH_TENSOR_DIM_APPLY2(real, tensor, real, hist, dimension, long i;
for(i = 0; i < tensor_size; i++)
{
if(tensor_data[i*tensor_stride] >= minval && tensor_data[i*tensor_stride] <= maxval) {
const int bin = (int)((tensor_data[i*tensor_stride]-minval) / (maxval-minval) * nbins);
hist_data[THMin(bin, nbins-1)] += 1;
}
}
);
}
#undef TH_MATH_NAME
#endif /* floating point only part */
#undef IS_NONZERO
#endif
|
activation.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_OPS_ACTIVATION_H_
#define MACE_OPS_ACTIVATION_H_
#include <algorithm>
#include <cmath>
#include <string>
#include "mace/core/types.h"
#include "mace/ops/arm/activation_neon.h"
#include "mace/utils/logging.h"
namespace mace {
namespace ops {
enum ActivationType {
NOOP = 0,
RELU = 1,
RELUX = 2,
PRELU = 3,
TANH = 4,
SIGMOID = 5
};
inline ActivationType StringToActivationType(const std::string type) {
if (type == "RELU") {
return ActivationType::RELU;
} else if (type == "RELUX") {
return ActivationType::RELUX;
} else if (type == "PRELU") {
return ActivationType::PRELU;
} else if (type == "TANH") {
return ActivationType::TANH;
} else if (type == "SIGMOID") {
return ActivationType::SIGMOID;
} else if (type == "NOOP") {
return ActivationType::NOOP;
} else {
LOG(FATAL) << "Unknown activation type: " << type;
}
return ActivationType::NOOP;
}
template <typename T>
void DoActivation(const T *input_ptr,
T *output_ptr,
const index_t size,
const ActivationType type,
const float relux_max_limit) {
MACE_CHECK(DataTypeToEnum<T>::value != DataType::DT_HALF);
switch (type) {
case NOOP:
break;
case RELU:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = std::max(input_ptr[i], static_cast<T>(0));
}
break;
case RELUX:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = std::min(std::max(input_ptr[i], static_cast<T>(0)),
static_cast<T>(relux_max_limit));
}
break;
case TANH:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = std::tanh(input_ptr[i]);
}
break;
case SIGMOID:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = 1 / (1 + std::exp(-input_ptr[i]));
}
break;
default:
LOG(FATAL) << "Unknown activation type: " << type;
}
}
template<>
inline void DoActivation(const float *input_ptr,
float *output_ptr,
const index_t size,
const ActivationType type,
const float relux_max_limit) {
switch (type) {
case NOOP:
break;
case RELU:
ReluNeon(input_ptr, size, output_ptr);
break;
case RELUX:
ReluxNeon(input_ptr, relux_max_limit, size, output_ptr);
break;
case TANH:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = std::tanh(input_ptr[i]);
}
break;
case SIGMOID:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = 1 / (1 + std::exp(-input_ptr[i]));
}
break;
default:
LOG(FATAL) << "Unknown activation type: " << type;
}
}
template <typename T>
void PReLUActivation(const T *input_ptr,
const index_t outer_size,
const index_t input_chan,
const index_t inner_size,
const T *alpha_ptr,
T *output_ptr) {
#pragma omp parallel for collapse(3)
for (index_t i = 0; i < outer_size; ++i) {
for (index_t chan_idx = 0; chan_idx < input_chan; ++chan_idx) {
for (index_t j = 0; j < inner_size; ++j) {
index_t idx = i * input_chan * inner_size + chan_idx * inner_size + j;
if (input_ptr[idx] < 0) {
output_ptr[idx] = input_ptr[idx] * alpha_ptr[chan_idx];
} else {
output_ptr[idx] = input_ptr[idx];
}
}
}
}
}
} // namespace ops
} // namespace mace
#endif // MACE_OPS_ACTIVATION_H_
|
jacobi1d_optimized_omp.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define NUM_THREADS 4
/* --
* Do nsweeps sweeps of Jacobi iteration on a 1D Poisson problem
*
* -u'' = f
*
* discretized by n+1 equally spaced mesh points on [0,1].
* u is subject to Dirichlet boundary conditions specified in
* the u[0] and u[n] entries of the initial vector.
*/
void jacobi(int nsweeps, int n, double* u, double* f)
{
int i, sweep;
double h = 1.0 / n;
double h2 = h*h;
double* utmp = (double*) malloc( (n+1) * sizeof(double) );
/* Fill boundary conditions into utmp */
utmp[0] = u[0];
utmp[n] = u[n];
#pragma omp parallel for
for (sweep = 0; sweep < nsweeps; sweep += 2) {
/* Old data in u; new data in utmp */
#pragma omp parallel for
for (i = 1; i < n; i++) {
utmp[i] = (u[i-1] + u[i+1] + h2*f[i])/2;
if (i > 1) {
// calc data in u;
u[i -1] = (utmp[i-2] + utmp[i] + h2*f[i-1])/2;
if (i == n - 1) {
// extra calc for last element of u.
u[i] = (utmp[i-1] + utmp[i+1] + h2*f[i])/2;
}
}
}
}
free(utmp);
}
void write_solution(int n, double* u, const char* fname)
{
int i;
double h = 1.0 / n;
FILE* fp = fopen(fname, "w+");
for (i = 0; i <= n; ++i)
fprintf(fp, "%g %g\n", i*h, u[i]);
fclose(fp);
}
int main(int argc, char** argv)
{
int i;
int n, nsteps;
double* u;
double* f;
double h;
char* fname;
/* Process arguments */
n = (argc > 1) ? atoi(argv[1]) : 100;
nsteps = (argc > 2) ? atoi(argv[2]) : 100;
fname = (argc > 3) ? argv[3] : NULL;
h = 1.0/n;
/* Allocate and initialize arrays */
omp_set_num_threads(NUM_THREADS);
u = (double*) malloc( (n+1) * sizeof(double) );
f = (double*) malloc( (n+1) * sizeof(double) );
memset(u, 0, (n+1) * sizeof(double));
for (i = 0; i <= n; ++i)
f[i] = i * h;
/* Run the solver */
jacobi(nsteps, n, u, f);
/* Run the solver */
printf("n: %d\n"
"nsteps: %d\n",
n, nsteps);
/* Write the results */
if (fname)
write_solution(n, u, fname);
free(f);
free(u);
return 0;
}
|
GB_unaryop__ainv_uint64_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint64_fp32
// op(A') function: GB_tran__ainv_uint64_fp32
// C type: uint64_t
// A type: float
// cast: uint64_t cij ; GB_CAST_UNSIGNED(cij,aij,64)
// unaryop: cij = -aij
#define GB_ATYPE \
float
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z ; GB_CAST_UNSIGNED(z,x,64) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint64_fp32
(
uint64_t *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint64_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
polynomial.h | #ifndef MATH_POLYNOMIAL_H
#define MATH_POLYNOMIAL_H
#include "alias.h"
#include "trivial.h"
#include "quartz_internal/error.h"
#include "quartz_internal/util/member_function_wrapper.h"
#include "quartz_internal/util/type_converter.h"
#include "quartz_internal/details/math/space.h"
namespace math {
namespace polynomial {
// Term represents the each term a polynomial will have.
template<typename T>
struct Term {
T coef;
lvec exponents;
template<typename U>
std::common_type_t<T, U> at(const arma::Col<U> & position) const {
if (position.n_elem != this->exponents.n_elem) {
throw Error(
"Different dimension between the position and polynomial term");
}
auto result = std::common_type_t<T, U>(1.0);
for (arma::uword i = 0; i < position.n_elem; i++) {
if (this->exponents(i) == 0)
continue;
result *= std::pow(position(i), this->exponents(i));
}
return this->coef * result;
}
inline
Term() :
coef(0.0),
exponents() {}
explicit
inline
Term(const arma::uword dim, const T coef = T{0.0}) :
coef(coef),
exponents(arma::zeros<lvec>(dim)) {}
inline
Term(const T coef, const lvec & indices) :
coef(coef),
exponents(indices) {}
inline
Term(const T coef, const arma::uvec & indices) :
coef(coef),
exponents(arma::conv_to<lvec>::from(indices)) {}
arma::uword dim() const {
return this->exponents.n_elem;
}
template<typename U>
Term<std::common_type_t<T, U>>
scale(const arma::Col<U> & scaling) const {
return {this->at(scaling), this->exponents};
}
template<typename U>
bool is_same_term(const Term<U> & term) const {
if (this->exponents == term.exponents) return true;
return false;
}
inline
Term<T> derivative(const arma::uword index) const {
if (this->exponents(index) == 0) {
return {T{0.}, arma::zeros<lvec>(this->dim())};
} else {
lvec new_indices = this->exponents;
new_indices(index) -= 1;
return {this->coef * (double) (new_indices(index) + 1), new_indices};
}
}
inline
Term<T> derivative(const arma::uvec & index) const {
if (index.n_elem != this->exponents.n_elem) {
throw Error("Derivative operator out of bound");
}
Term<T> result = *this;
#pragma omp parallel for
for (arma::uword i = 0; i < index.n_elem; i++) {
for (arma::uword j = 0; j < index(i); j++) {
result = result.derivative(i);
}
}
return result;
}
template<typename U>
auto differentiate(const U & function) const {
if (arma::min(this->exponents) < 0) {
throw Error("Quartz does not support integration operator");
}
return quartz::derivative(function, arma::conv_to<arma::uvec>::from(
this->exponents)) * this->coef;
}
inline
Term<T> pow(const arma::uword power) const {
return {std::pow(this->coef, power), this->exponents * power};
}
bool operator==(const Term<T> & term) const {
return this->coef == term.coef && this->is_same_term(term);
}
Term& operator=(const Term &) = default;
};
} // namespace polynomial
// The Polynomial struct is stored as a list of exponents and the corresponding
// coefficients. The exponents are stored column-wise.
template<typename T>
struct Polynomial {
public:
arma::Col<T> coefs;
lmat exponents;
inline
Polynomial(void) :
coefs(),
exponents() {}
inline
Polynomial(const arma::Col<T> & coefs, const lmat & exponents) :
coefs(coefs),
exponents(exponents) {
if (coefs.n_elem != exponents.n_cols) {
throw Error(
"the number between coefficients and the exponents is not consistent");
}
}
inline
Polynomial(const polynomial::Term<T> & term) :
coefs(arma::Col<T>{term.coef}),
exponents(lmat(term.exponents)) {}
inline
Polynomial(const Polynomial<T> & term) :
coefs(term.coefs),
exponents(term.exponents) {}
inline
Polynomial(const arma::uword dim, const T coef = 0.0) :
coefs(arma::Col<T>{coef}),
exponents(arma::zeros<lmat>(dim, 1)) {}
inline
polynomial::Term<T> term(arma::uword index) const {
if (index >= this->coefs.n_elem) {
throw Error("The specified polynomial term does not exist");
}
return polynomial::Term<T>{this->coefs(index), this->exponents.col(index)};
}
inline
arma::uword dim() const {
return this->exponents.n_rows;
}
inline
long long grade() const {
return arma::max(arma::sum(this->exponents));
}
inline
Polynomial<double> real() const {
return Polynomial<double>(arma::real(this->coefs), this->exponents).clean();
}
inline
Polynomial<double> imag() const {
return Polynomial<double>(arma::imag(this->coefs), this->exponents).clean();
}
inline
Polynomial<double> abs() const {
return Polynomial<double>(arma::abs(this->coefs), this->exponents).clean();
}
inline
Polynomial<T> conj() const {
if constexpr(std::is_same<T,double>::value) {
return *this;
} else {
const arma::cx_vec new_coefs = arma::conj(this->coefs);
return Polynomial<T>(new_coefs, this->exponents);
}
}
template<typename U>
std::common_type_t<T, U> at(const arma::Col<U> & position) const {
if (position.n_elem != this->exponents.n_rows) {
throw Error(
"Different dimension between the position and polynomial term");
};
auto result = std::common_type_t<T, U>(0.0);
for (arma::uword i = 0; i < this->exponents.n_cols; i++) {
const polynomial::Term<T> term = this->term(i);
result += term.at(position);
}
return result;
}
inline
Polynomial<T> derivative(const arma::uword index) const {
if (index >= this->dim()) {
throw Error("Derivative operator out of bound");
}
Polynomial<T> result = Polynomial<T>(this->term(0).derivative(index));
for (arma::uword i = 1; i < this->coefs.n_elem; i++) {
result = result + this->term(i).derivative(index);
}
return result.clean();
}
inline
Polynomial<T> derivative(const arma::uvec & index) const {
if (index.n_elem != this->dim()) {
throw Error("Derivative operator out of bound");
}
Polynomial<T> result = *this;
#pragma omp parallel for
for (arma::uword i = 0; i < index.n_elem; i++) {
for (arma::uword j = 0; j < index(i); j++) {
result = result.derivative(i);
}
}
return result.clean();
}
template<typename U>
auto differentiate(const U & function) const {
auto result = this->term(0).differentiate(function);
#pragma omp parallel for
for (arma::uword i = 0; i < this->coefs.n_elem; i++) {
const polynomial::Term<T> term = this->term(i);
result = result + term.differentiate(function);
}
return result;
}
template<typename U>
Polynomial<std::common_type_t<T, U>>
displace(const arma::Col<U> & displacement) const {
if (this->dim() != displacement.n_elem) {
throw Error(
"Different dimension between the displacement and polynomial term");
}
const auto dim = this->dim();
auto result =
Polynomial<std::common_type_t<T, U>>(dim);
const auto binomial =
[](const double n, const double i) -> double {
return math::factorial(n) / factorial(i) / factorial(n - i);
};
const auto term_displace =
[&binomial](const polynomial::Term<T> & term,
const arma::Col<U> & displacement)
-> Polynomial<std::common_type_t<T, U>> {
const arma::uword dim = term.dim();
const auto & exponent = term.exponents;
const arma::uvec grid = arma::conv_to<arma::uvec>::from(exponent + 1);
const auto iterations = space::auto_iteration_over_dims(grid);
auto result = Polynomial<std::common_type_t<T, U>>(dim);
#pragma omp parallel for
for (arma::uword i = 0; i < iterations.n_cols; i++) {
const lvec displacements_poly = arma::conv_to<lvec>::from(
iterations.col(i));
const lvec new_exponents = exponent - displacements_poly;
const math::polynomial::Term<double> local_term(1.0,
displacements_poly);
double binomial_coef = 1;
for (arma::uword j = 0; j < dim; j++) {
binomial_coef *= binomial(exponent(j), displacements_poly(j));
}
result = result + math::polynomial::Term<double>(
term.coef * binomial_coef * local_term.at(displacement),
new_exponents);
}
return result;
};
#pragma omp parallel for
for (arma::uword i = 0; i < this->coefs.n_elem; i++) {
result = result + term_displace(this->term(i), displacement);
}
return result;
}
Polynomial<T> scale(const arma::vec & scaling) const {
auto result = Polynomial<T>(this->term(0).scale(scaling));
for (arma::uword i = 1; i < this->coefs.n_elem; i++) {
result = result + this->term(i).scale(scaling);
}
return result;
}
Polynomial<T> pow(const arma::uword power) const {
if (power == 0) {
return Polynomial<T>(this->dim(), 1.0);
}
Polynomial<T> result = *this;
for (arma::uword i = 1; i < power; i++) {
result = result * *this;
}
return result;
}
template<typename U>
Polynomial<std::common_type_t<T, U>>
operator()(const std::vector<Polynomial<U>> & polynomial_list) const {
const auto dim = this->dim();
if (this->dim() != polynomial_list.size()) {
throw Error("Mismatched number between the operator and term");
}
const auto term_operate = [dim](const polynomial::Term<T> & term,
const std::vector<Polynomial<U>> & polynomial_list)
-> Polynomial<std::common_type_t<T, U>> {
auto result = Polynomial<std::common_type_t<T, U>>(
polynomial_list[0].dim(), 1.0);
#pragma omp parallel for
for (arma::uword i = 0; i < dim; i++) {
result = result * polynomial_list[i].pow(term.exponents(i));
}
return result * term.coef;
};
auto result = Polynomial<std::common_type_t<T, U>>(polynomial_list[0].dim(),
0.0);
for (arma::uword i = 0; i < this->coefs.n_elem; i++) {
result = result + term_operate(this->term(i), polynomial_list);
}
return result;
}
template<typename U>
Polynomial<std::common_type_t<T, U>>
operator+(const Polynomial<U> & B) const {
const lmat new_indices = arma::join_rows(this->exponents, B.exponents);
const auto converted_this_coefs =
arma::conv_to<arma::Col<std::common_type_t<T, U>>>::from(this->coefs);
const auto converted_B_coefs =
arma::conv_to<arma::Col<std::common_type_t<T, U>>>::from(B.coefs);
const arma::Col<std::common_type_t<T, U>>
new_coefs =
arma::join_cols(converted_this_coefs, converted_B_coefs);
return Polynomial<std::common_type_t<T, U>>{new_coefs, new_indices}.clean();
}
template<typename U>
Polynomial<std::common_type_t<T, U>> operator+(const U B) const {
const lvec dummy_indices = arma::zeros<lvec>(
this->exponents.n_rows);
const lmat new_indices = arma::join_rows(this->exponents,
dummy_indices);
const arma::Col<std::common_type_t<T, U>> converted_coefs =
arma::conv_to<arma::Col<std::common_type_t<T, U>>>::from(this->coefs);
const arma::Col<std::common_type_t<T, U>> new_coefs = arma::join_cols(
converted_coefs, arma::Col<std::common_type_t<T, U>>{B});
return Polynomial<std::common_type_t<T, U>>{new_coefs, new_indices}.clean();
}
template<typename U>
Polynomial<std::common_type_t<T, U>>
operator+(const polynomial::Term<U> & B) const {
const lmat new_indices = arma::join_rows(this->exponents, B.exponents);
const auto converted_this_coefs =
arma::conv_to<arma::Col<std::common_type_t<T, U>>>::from(this->coefs);
const auto converted_B_coef = arma::Col<std::common_type_t<T, U>>{B.coef};
const arma::Col<std::common_type_t<T, U>>
new_coefs = arma::join_cols(converted_this_coefs, converted_B_coef);
return Polynomial<std::common_type_t<T, U>>{new_coefs, new_indices}.clean();
}
template<typename U>
Polynomial<std::common_type_t<T, U>>
operator*(const polynomial::Term<U> & B) const {
lmat new_indices = this->exponents;
new_indices.each_col() += B.exponents;
const arma::Col<std::common_type_t<T, U>>
new_coefs = this->coefs * B.coef;
return Polynomial<std::common_type_t<T, U>>{new_coefs, new_indices}.clean();
}
template<typename U>
Polynomial<std::common_type_t<T, U>>
operator*(const Polynomial<U> & B) const {
Polynomial<std::common_type_t<T, U>> result_0 = (*this) * B.term(0);
for (arma::uword i = 1; i < B.coefs.n_elem; i++) {
result_0 = result_0 + (*this) * B.term(i);
}
return result_0.clean();
}
template<typename U>
Polynomial<std::common_type_t<T, U>> operator*(const U B) const {
return Polynomial<std::common_type_t<T, U>>{this->coefs * B,
this->exponents}.clean();
}
template<typename U>
Polynomial<std::common_type_t<T, U>>
operator-(const Polynomial<U> & B) const {
return *this + B * (-1.0);
}
template<typename U>
Polynomial<std::common_type_t<T, U>> operator-(const U B) const {
return *this + (-B);
}
template<typename U>
Polynomial<std::common_type_t<T, U>> operator/(const U B) const {
return *(this) * (1.0 / B);
}
template<typename U>
Polynomial<std::common_type_t<T, U>>
operator/(const polynomial::Term<T> & B) const {
lmat new_indices = this->exponents;
new_indices.each_col() -= B.exponents;
const arma::Col<std::common_type_t<T, U>> new_coefs = this->coefs / B.coef;
return Polynomial<std::common_type_t<T, U>>{new_coefs, new_indices}.clean();
}
Polynomial<T> sort() const {
const lvec maximum_exponents = arma::max(this->exponents, 1);
const lvec minimum_exponents = arma::min(this->exponents, 1);
const arma::uvec grid =
arma::conv_to<arma::uvec>::from(maximum_exponents - minimum_exponents)
+ 1;
const auto table = math::space::grids_to_table(grid);
lmat indices = this->exponents;
indices.each_col() -= minimum_exponents;
const arma::umat converted_indices = arma::conv_to<arma::umat>::from(
indices);
arma::uvec key(converted_indices.n_cols);
#pragma omp parallel for
for (arma::uword i = 0; i < converted_indices.n_cols; i++) {
const arma::uvec index = converted_indices.col(i);
key(i) = math::space::indices_to_index(index, table);
}
const arma::uvec unique_elements = arma::unique(key);
arma::Col<T> result_coefs(unique_elements.n_elem);
lmat result_exponents(this->dim(), unique_elements.n_elem);
for (arma::uword i = 0; i < unique_elements.n_elem; i++) {
const arma::uvec
identical_terms_indices = arma::find(key == unique_elements(i));
const lvec corresponding_exponent =
this->exponents.col(identical_terms_indices(0));
const arma::Col<T> corresponding_coef =
this->coefs.rows(identical_terms_indices);
result_coefs(i) = arma::sum(corresponding_coef);
result_exponents.col(i) = corresponding_exponent;
}
return Polynomial<T>(result_coefs, result_exponents);
}
Polynomial<T> clean() const {
const auto sorted_polynomial = this->sort();
const arma::uvec non_zero = arma::find(sorted_polynomial.coefs);
if (non_zero.n_elem == 0) {
return Polynomial<T>(sorted_polynomial.dim());
}
return Polynomial<T>(sorted_polynomial.coefs.rows(non_zero),
sorted_polynomial.exponents.cols(non_zero));
}
std::string to_string(const int precision = 3,
const int width = -1) const {
const auto printer = [](const Polynomial<double> term,
const int precision,
const int width) {
const std::vector<std::string> variables =
util::variable_names(term.dim());
std::string result = " ";
if (width <= 0) {
result += fmt::format("{:.{}}", term.coefs(0), precision);
} else {
result += format(term.coefs(0), precision, width);
}
for (arma::uword j = 0; j < term.exponents.n_rows; j++) {
result =
result + variables[j] + "^" + std::to_string(term.exponents(j, 0)) +
" ";
}
for (arma::uword i = 1; i < term.exponents.n_cols; i++) {
if (term.coefs(i) < 0) {
result += "- ";
} else {
result += "+ ";
}
if (width <= 0) {
result += fmt::format("{:.{}}", std::abs(term.coefs(i)), precision);
} else {
result += format(term.coefs(i), precision, width);
}
for (arma::uword j = 0; j < term.exponents.n_rows; j++) {
result =
result + variables[j] + "^" +
std::to_string(term.exponents(j, i)) + " ";
}
}
return result;
};
if constexpr(std::is_same<T, cx_double>::value) {
return " (" + printer(this->real(), precision, width)
+ "," + printer(this->imag(), precision, width) + ") ";
} else {
return printer(*this, precision, width);
}
}
Polynomial<T>& operator=(const Polynomial<T> &) = default;
};
template<typename T>
std::vector<Polynomial<T>> transform(const arma::Mat<T> & transform_matrix) {
std::vector<Polynomial<T>> result[transform_matrix.n_cols];
#pragma omp parallel for
for (arma::uword i = 0; i < transform_matrix.n_cols; i++) {
result[i] = Polynomial<T>(transform_matrix.row(i).st(),
arma::eye<lmat>(arma::size(transform_matrix)));
}
return result;
}
}
#endif //MATH_POLYNOMIAL_H
|
kvstore_local.h | /**
* Copyright (c) 2015 by Contributors
* @file kvstore_local.h
* @brief local implementation
*/
#ifndef MXNET_KVSTORE_KVSTORE_LOCAL_H_
#define MXNET_KVSTORE_KVSTORE_LOCAL_H_
#include <mxnet/kvstore.h>
#include <unordered_map>
#include <bitset>
#include <vector>
#include <utility>
#include <algorithm>
namespace mxnet {
namespace kvstore {
/**
* \brief store data in local machine
*/
class KVStoreLocal : public KVStore {
public:
KVStoreLocal() {
pinned_ctx_ = (MXNET_USE_CUDA != 0) ?
Context::CPUPinned(0) : Context::CPU();
// the server perameters
nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4);
bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000);
}
void Init(const std::vector<int>& keys,
const std::vector<NDArray>& values) override {
for (size_t i = 0; i < keys.size(); ++i) {
CHECK(local_.find(keys[i]) == local_.end())
<< "duplicate init of key " << keys[i];
local_[keys[i]] = values[i].Copy(pinned_ctx_);
}
}
void Push(const std::vector<int>& keys,
const std::vector<NDArray>& values,
int priority) override {
std::vector<int> uniq_keys;
std::vector<std::vector<NDArray> > grouped_vals;
GroupKVPairs(keys, values, &uniq_keys, &grouped_vals);
for (size_t i = 0; i < uniq_keys.size(); ++i) {
int key = uniq_keys[i];
const NDArray& merged = MergePushValue(key, grouped_vals[i], priority);
if (updater_ != nullptr) {
auto it = local_.find(key);
CHECK(it != local_.end()) << "key " << key << " has not been inited";
updater_(key, merged, &(it->second));
}
}
}
void Pull(const std::vector<int>& keys,
const std::vector<NDArray*>& values,
int priority) override {
std::vector<int> uniq_keys;
std::vector<std::vector<NDArray*> > grouped_vals;
GroupKVPairs(keys, values, &uniq_keys, &grouped_vals);
for (size_t i = 0; i < uniq_keys.size(); ++i) {
int key = uniq_keys[i];
auto it = merge_buf_.find(key);
if (updater_ != nullptr || it == merge_buf_.end()) {
auto it = local_.find(key);
CHECK(it != local_.end()) << "key " << key << " has not been inited";
ScatterPullValue(
key, it->second, grouped_vals[i], priority);
} else {
ScatterPullValue(
key, it->second.merged, grouped_vals[i], priority);
}
}
}
protected:
/// \brief temperal space for pushing and pull
struct BufferEntry {
// Context of merged
Context ctx;
// the merged value
NDArray merged;
// the merged value on device
NDArray merged_device;
/// \brief the cpu buffer for gpu data
std::vector<NDArray> copy_buf;
// allocate copy buffer, if it has not been allocated
inline NDArray *AllocCopyBuf(size_t index, Context ctx, const TShape& shape) {
if (index >= copy_buf.size()) copy_buf.resize(index + 1);
if (copy_buf[index].is_none()) {
copy_buf[index] = NDArray(shape, ctx);
}
return ©_buf[index];
}
};
/**
* \brief group values on keys
*/
template <typename V>
void GroupKVPairs(const std::vector<int>& keys,
const std::vector<V>& values,
std::vector<int>* uniq_keys,
std::vector<std::vector<V> >* grouped_vals) {
CHECK_EQ(keys.size(), values.size());
// TODO(mli) check if already sorted as an optimization
using Idx = std::pair<int, int>;
std::vector<Idx> idx(keys.size());
for (size_t i = 0; i < keys.size(); ++i) {
idx[i].first = keys[i]; idx[i].second = i;
}
std::sort(idx.begin(), idx.end(), [](const Idx& a, const Idx& b) {
return a.first < b.first;
});
int pre_key = idx[0].first - 1;
for (auto i : idx) {
if (i.first != pre_key) {
uniq_keys->push_back(i.first);
grouped_vals->push_back({values[i.second]});
pre_key = i.first;;
} else {
grouped_vals->back().push_back(values[i.second]);
}
}
}
/*!
* \brief returns the aggregated push value
*/
virtual const NDArray& MergePushValue(
int key, const std::vector<NDArray>& val, int priority) {
auto& buf = merge_buf_[key];
// copy buffer
std::vector<Engine::VarHandle> const_vars(val.size() - 1);
std::vector<NDArray> reduce(val.size());
if (buf.merged.is_none()) {
buf.ctx = Context::CPUPinned(val[0].ctx().dev_id);
if (MXNET_USE_CUDA == 0) buf.ctx = Context::CPU();
buf.merged = NDArray(val[0].shape(), buf.ctx);
}
CopyFromTo(val[0], &(buf.merged), priority);
reduce[0] = buf.merged;
for (size_t i = 1; i < val.size(); ++i) {
const NDArray& v = val[i];
Context ctx = v.ctx();
if (ctx.dev_mask() == cpu::kDevMask) {
reduce[i] = val[i];
} else {
NDArray *copy_buf = buf.AllocCopyBuf(
i, Context::CPUPinned(ctx.dev_id), val[0].shape());
CopyFromTo(val[i], copy_buf, priority);
reduce[i] = *copy_buf;
}
const_vars[i - 1] = reduce[i].var();
}
Engine::Get()->PushSync([reduce, this](RunContext rctx) {
ReduceSumCPU(reduce);
}, Context::CPU(), const_vars, {reduce[0].var()},
FnProperty::kCPUPrioritized, priority);
return buf.merged;
}
virtual void ScatterPullValue(
int key,
const NDArray& src,
const std::vector<NDArray*>& vals,
int priority) {
for (auto* vptr : vals) {
CopyFromTo(src, vptr, priority);
}
}
/// \brief buffer for merging push value
std::unordered_map<int, BufferEntry> merge_buf_;
// pinned context
Context pinned_ctx_;
// the lower bound of a big array
size_t bigarray_bound_;
private:
inline static void ReduceSumCPU(const std::vector<real_t*> &dptr,
size_t offset, index_t size) {
using namespace mshadow; // NOLINT(*)
Tensor<cpu, 1> in_0(dptr[0] + offset, Shape1(size));
switch (dptr.size()) {
case 2: {
Tensor<cpu, 1> in_1(dptr[1] + offset, Shape1(size));
in_0 += in_1;
break;
}
case 3: {
Tensor<cpu, 1> in_1(dptr[1] + offset, Shape1(size));
Tensor<cpu, 1> in_2(dptr[2] + offset, Shape1(size));
in_0 += in_1 + in_2;
break;
}
case 4: {
Tensor<cpu, 1> in_1(dptr[1] + offset, Shape1(size));
Tensor<cpu, 1> in_2(dptr[2] + offset, Shape1(size));
Tensor<cpu, 1> in_3(dptr[3] + offset, Shape1(size));
in_0 += in_1 + in_2 + in_3;
break;
}
default: {
for (size_t i = 1; i < dptr.size(); ++i) {
Tensor<cpu, 1> in_k(dptr[i] + offset, Shape1(size));
in_0 += in_k;
}
}
}
}
// reduce sum into val[0]
// this is performance critical
inline void ReduceSumCPU(const std::vector<NDArray> &in_data) {
const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10));
// ge ptr out
std::vector<real_t*> dptr(in_data.size());
for (size_t i = 0; i < in_data.size(); ++i) {
TBlob data = in_data[i].data();
CHECK(data.CheckContiguous());
dptr[i] = data.FlatTo2D<cpu, real_t>().dptr_;
}
size_t total = in_data[0].shape().Size();
long ntask = (total + step - 1) / step; // NOLINT(*)
if (total < bigarray_bound_ || nthread_reduction_ <= 1) {
ReduceSumCPU(dptr, 0, total);
} else {
#pragma omp parallel for schedule(static) num_threads(nthread_reduction_)
for (long j = 0; j < ntask; ++j) { // NOLINT(*)
size_t k = static_cast<size_t>(j);
size_t begin = std::min(k * step, total);
size_t end = std::min((k + 1) * step, total);
if (j == ntask - 1) CHECK_EQ(end, total);
ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin));
}
}
}
/// \brief buffer for storing local values
std::unordered_map<int, NDArray> local_;
// number of threads to do reduction
int nthread_reduction_;
};
} // namespace kvstore
} // namespace mxnet
#endif // MXNET_KVSTORE_KVSTORE_LOCAL_H_
|
nr_numint.c | /*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "cint.h"
#include "gto/grid_ao_drv.h"
#include "np_helper/np_helper.h"
#include "vhf/fblas.h"
#include <assert.h>
#define BOXSIZE 56
int VXCao_empty_blocks(char *empty, unsigned char *non0table, int *shls_slice,
int *ao_loc)
{
if (non0table == NULL || shls_slice == NULL || ao_loc == NULL) {
return 0;
}
const int sh0 = shls_slice[0];
const int sh1 = shls_slice[1];
int bas_id;
int box_id = 0;
int bound = BOXSIZE;
int has0 = 0;
empty[box_id] = 1;
for (bas_id = sh0; bas_id < sh1; bas_id++) {
empty[box_id] &= !non0table[bas_id];
if (ao_loc[bas_id] == bound) {
has0 |= empty[box_id];
box_id++;
bound += BOXSIZE;
empty[box_id] = 1;
} else if (ao_loc[bas_id] > bound) {
has0 |= empty[box_id];
box_id++;
bound += BOXSIZE;
empty[box_id] = !non0table[bas_id];
}
}
return has0;
}
static void dot_ao_dm(double *vm, double *ao, double *dm,
int nao, int nocc, int ngrids, int bgrids,
unsigned char *non0table, int *shls_slice, int *ao_loc)
{
int nbox = (nao+BOXSIZE-1) / BOXSIZE;
char empty[nbox];
int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc);
const char TRANS_T = 'T';
const char TRANS_N = 'N';
const double D1 = 1;
double beta = 0;
if (has0) {
int box_id, bas_id, b0, blen, i, j;
for (box_id = 0; box_id < nbox; box_id++) {
if (!empty[box_id]) {
b0 = box_id * BOXSIZE;
blen = MIN(nao-b0, BOXSIZE);
dgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &blen,
&D1, ao+b0*ngrids, &ngrids, dm+b0*nocc, &nocc,
&beta, vm, &ngrids);
beta = 1.0;
}
}
if (beta == 0) { // all empty
for (i = 0; i < nocc; i++) {
for (j = 0; j < bgrids; j++) {
vm[i*ngrids+j] = 0;
}
}
}
} else {
dgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &nao,
&D1, ao, &ngrids, dm, &nocc, &beta, vm, &ngrids);
}
}
/* vm[nocc,ngrids] = ao[i,ngrids] * dm[i,nocc] */
void VXCdot_ao_dm(double *vm, double *ao, double *dm,
int nao, int nocc, int ngrids, int nbas,
unsigned char *non0table, int *shls_slice, int *ao_loc)
{
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
#pragma omp parallel default(none) \
shared(vm, ao, dm, nao, nocc, ngrids, nbas, \
non0table, shls_slice, ao_loc)
{
int ip, ib;
#pragma omp for nowait schedule(static)
for (ib = 0; ib < nblk; ib++) {
ip = ib * BLKSIZE;
dot_ao_dm(vm+ip, ao+ip, dm,
nao, nocc, ngrids, MIN(ngrids-ip, BLKSIZE),
non0table+ib*nbas, shls_slice, ao_loc);
}
}
}
/* vv[n,m] = ao1[n,ngrids] * ao2[m,ngrids] */
static void dot_ao_ao(double *vv, double *ao1, double *ao2,
int nao, int ngrids, int bgrids, int hermi,
unsigned char *non0table, int *shls_slice, int *ao_loc)
{
int nbox = (nao+BOXSIZE-1) / BOXSIZE;
char empty[nbox];
int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc);
const char TRANS_T = 'T';
const char TRANS_N = 'N';
const double D1 = 1;
if (has0) {
int ib, jb, b0i, b0j, leni, lenj;
int j1 = nbox;
for (ib = 0; ib < nbox; ib++) {
if (!empty[ib]) {
b0i = ib * BOXSIZE;
leni = MIN(nao-b0i, BOXSIZE);
if (hermi) {
j1 = ib + 1;
}
for (jb = 0; jb < j1; jb++) {
if (!empty[jb]) {
b0j = jb * BOXSIZE;
lenj = MIN(nao-b0j, BOXSIZE);
dgemm_(&TRANS_T, &TRANS_N, &lenj, &leni, &bgrids, &D1,
ao2+b0j*ngrids, &ngrids, ao1+b0i*ngrids, &ngrids,
&D1, vv+b0i*nao+b0j, &nao);
} }
} }
} else {
dgemm_(&TRANS_T, &TRANS_N, &nao, &nao, &bgrids,
&D1, ao2, &ngrids, ao1, &ngrids, &D1, vv, &nao);
}
}
/* vv[nao,nao] = ao1[i,nao] * ao2[i,nao] */
void VXCdot_ao_ao(double *vv, double *ao1, double *ao2,
int nao, int ngrids, int nbas, int hermi,
unsigned char *non0table, int *shls_slice, int *ao_loc)
{
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
memset(vv, 0, sizeof(double) * nao * nao);
#pragma omp parallel default(none) \
shared(vv, ao1, ao2, nao, ngrids, nbas, hermi, \
non0table, shls_slice, ao_loc)
{
int ip, ib;
double *v_priv = calloc(nao*nao, sizeof(double));
#pragma omp for nowait schedule(static)
for (ib = 0; ib < nblk; ib++) {
ip = ib * BLKSIZE;
dot_ao_ao(v_priv, ao1+ip, ao2+ip,
nao, ngrids, MIN(ngrids-ip, BLKSIZE), hermi,
non0table+ib*nbas, shls_slice, ao_loc);
}
#pragma omp critical
{
for (ip = 0; ip < nao*nao; ip++) {
vv[ip] += v_priv[ip];
}
}
free(v_priv);
}
if (hermi != 0) {
NPdsymm_triu(nao, vv, hermi);
}
}
|
Example_affinity.3.c | /*
* @@name: affinity.3c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: success
* @@version: omp_4.0
*/
void work();
int main()
{
#pragma omp parallel proc_bind(close) num_threads(4)
{
work();
}
return 0;
}
|
thbasic.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <limits.h>
#include "thnets.h"
#ifndef USEBLAS
#include "sgemm.h"
#endif
#ifdef ACCELERATE
#include <Accelerate/Accelerate.h>
#endif
#define THAtomicIncrement(a) __sync_fetch_and_add(a, 1);
#define THAtomicDecrement(a) __sync_fetch_and_add(a, -1);
THFloatStorage *THFloatStorage_new(long size)
{
THFloatStorage *s = malloc(sizeof(*s));
s->data = malloc(sizeof(*s->data) * size);
if(!s->data)
THError("Out of memory tryting to allocate %u bytes", sizeof(*s->data) * size);
s->nref = 1;
s->mustfree = 1;
return s;
}
THFloatStorage *THFloatStorage_newwithbuffer(void *buffer)
{
THFloatStorage *s = malloc(sizeof(*s));
s->data = buffer;
s->nref = 1;
s->mustfree = 0;
return s;
}
void THFloatStorage_free(THFloatStorage *s)
{
THAtomicDecrement(&s->nref);
if(s->nref == 0)
{
#ifdef CUDNN
if(s->mustfree == 2)
cudaFree(s->data);
else
#endif
#ifdef OPENCL
if(s->mustfree == 3)
clReleaseMemObject((cl_mem)s->data);
else
#endif
if(s->mustfree)
free(s->data);
free(s);
}
}
void THFloatTensor_resize(THFloatTensor *t, long *size, int nDimension)
{
int i;
long stride = 1;
char nostorage = 0;
long nelem = THFloatTensor_nElement(t);
t->nDimension = nDimension;
memcpy(t->size, size, nDimension * sizeof(*t->size));
for(i = nDimension - 1; i >= 0; i--)
{
t->stride[i] = stride;
stride *= t->size[i];
if(t->size[i] == -1)
nostorage = 1;
}
if(nelem != THFloatTensor_nElement(t))
{
if(nostorage)
{
if(t->storage)
{
THFloatStorage_free(t->storage);
t->storage = 0;
}
} else if(t->storage)
t->storage->data = realloc(t->storage->data, sizeof(*t->storage->data) * stride);
else t->storage = THFloatStorage_new(stride);
}
}
void THFloatTensor_resize4d(THFloatTensor *t, long size0, long size1, long size2, long size3)
{
long nElement = THFloatTensor_nElement(t);
t->nDimension = 4;
t->size[0] = size0;//batch
t->size[1] = size1;//plane
t->size[2] = size2;//row
t->size[3] = size3;//col
#ifdef USEQSML
t->stride[3] = size1;//col
t->stride[2] = size1 * size3;//row
t->stride[1] = 1;//plane
t->stride[0] = size1 * size2 * size3;//batch
#else
t->stride[3] = 1;//col
t->stride[2] = size3;//row
t->stride[1] = size2 * size3;//plane
t->stride[0] = size1 * size2 * size3;//batch
#endif
if(nElement != size0 * size1 * size2 * size3)
{
if(t->storage)
t->storage->data = realloc(t->storage->data, sizeof(*t->storage->data) * size0 * size1 * size2 * size3);
else t->storage = THFloatStorage_new(size0 * size1 * size2 * size3);
}
}
void THFloatTensor_resize3d(THFloatTensor *t, long size0, long size1, long size2)
{
long nElement = THFloatTensor_nElement(t);
t->nDimension = 3;
t->size[0] = size0;//col
t->size[1] = size1;//row
t->size[2] = size2;//plane
#ifdef USEQSML
t->stride[2] = size2;//col
t->stride[1] = size1 * size2;//row
t->stride[0] = 1;//plane
#else
t->stride[2] = 1;//col
t->stride[1] = size2;//row
t->stride[0] = size1 * size2;//plane
#endif
if(nElement != size0 * size1 * size2)
{
if(t->storage)
t->storage->data = realloc(t->storage->data, sizeof(*t->storage->data) * size0 * size1 * size2);
else t->storage = THFloatStorage_new(size0 * size1 * size2);
}
}
void THFloatTensor_resize2d(THFloatTensor *t, long size0, long size1)
{
long nElement = THFloatTensor_nElement(t);
t->nDimension = 2;
t->size[0] = size0;
t->size[1] = size1;
t->stride[1] = 1;
t->stride[0] = size1;
if(nElement != size0 * size1)
{
if(t->storage)
t->storage->data = realloc(t->storage->data, sizeof(*t->storage->data) * size0 * size1);
else t->storage = THFloatStorage_new(size0 * size1);
}
}
void THFloatTensor_resize1d(THFloatTensor *t, long size0)
{
long nElement = THFloatTensor_nElement(t);
t->nDimension = 1;
t->size[0] = size0;
t->stride[0] = 1;
if(nElement != size0)
{
if(t->storage)
t->storage->data = realloc(t->storage->data, sizeof(*t->storage->data) * size0);
else t->storage = THFloatStorage_new(size0);
}
}
void THError(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
fprintf(stderr, "\n");
exit(-1);
}
void THFloatTensor_free(THFloatTensor *t)
{
if(!t)
return;
if(t->storage)
THFloatStorage_free(t->storage);
free(t);
}
void THFloatTensor_slice(THFloatTensor *dst, THFloatTensor *src, int dimension, long from, long to)
{
int i;
if(dst->storage)
THFloatStorage_free(dst->storage);
dst->nDimension = src->nDimension;
dst->storageOffset = from * src->stride[dimension];
dst->size[dimension] = to - from;
for(i = 0; i < src->nDimension; i++)
{
if(i != dimension)
dst->size[i] = src->size[i];
dst->stride[i] = src->stride[i];
}
dst->storage = src->storage;
THAtomicIncrement(&dst->storage->nref);
}
THFloatTensor *THFloatTensor_newSelect(THFloatTensor *tensor, int dimension, long sliceIndex)
{
int i;
THFloatTensor *t = malloc(sizeof(*t));
#ifdef LOWP
t->mult = tensor->mult;
t->sub = tensor->sub;
#endif
t->nDimension = tensor->nDimension - 1;
t->storageOffset = tensor->storageOffset + sliceIndex * tensor->stride[dimension];
for(i = 0; i < dimension; i++)
{
t->size[i] = tensor->size[i];
t->stride[i] = tensor->stride[i];
}
for(i = dimension; i < t->nDimension; i++)
{
t->size[i] = tensor->size[i+1];
t->stride[i] = tensor->stride[i+1];
}
t->storage = tensor->storage;
THAtomicIncrement(&t->storage->nref);
return t;
}
long THFloatTensor_nElement(THFloatTensor *t)
{
if(t->nDimension == 0)
return 0;
else
{
long nElement = 1;
int i;
for(i = 0; i < t->nDimension; i++)
nElement *= t->size[i];
return nElement;
}
}
int THFloatTensor_isSameSizeAs(const THFloatTensor *self, const THFloatTensor* src)
{
int d;
if (self->nDimension != src->nDimension)
return 0;
for(d = 0; d < self->nDimension; ++d)
{
if(self->size[d] != src->size[d])
return 0;
}
return 1;
}
void THFloatTensor_resizeAs(THFloatTensor *tdst, THFloatTensor *tsrc)
{
if(tsrc == tdst)
return;
long nelemdst = THFloatTensor_nElement(tdst);
long nelemsrc = THFloatTensor_nElement(tsrc);
tdst->nDimension = tsrc->nDimension;
memcpy(tdst->size, tsrc->size, sizeof(tsrc->size));
memcpy(tdst->stride, tsrc->stride, sizeof(tsrc->stride));
if(nelemsrc != nelemdst)
{
if(tdst->storage)
tdst->storage->data = realloc(tdst->storage->data, sizeof(*tdst->storage->data) * nelemsrc);
else tdst->storage = THFloatStorage_new(nelemsrc);
}
}
void THFloatTensor_set(THFloatTensor *tdst, THFloatTensor *tsrc)
{
if(tsrc == tdst)
return;
if(tdst->storage)
THFloatStorage_free(tdst->storage);
*tdst = *tsrc;
if(tdst->storage)
THAtomicIncrement(&tsrc->storage->nref);
}
float *THFloatTensor_data(THFloatTensor *tensor)
{
if(tensor && tensor->storage && tensor->storage->data)
return tensor->storage->data + tensor->storageOffset;
return 0;
}
THFloatTensor *THFloatTensor_new()
{
return calloc(1, sizeof(THFloatTensor));
}
THFloatTensor *THFloatTensor_newWithStorage3d(THFloatStorage *storage, long storageOffset, long size0, long stride0, long size1, long stride1, long size2, long stride2)
{
THFloatTensor *t = THFloatTensor_new();
t->nDimension = 3;
t->size[0] = size0;
t->size[1] = size1;
t->size[2] = size2;
t->stride[0] = stride0 == -1 ? size1 * size2 : stride0;
t->stride[1] = stride1 == -1 ? size2 : stride1;
t->stride[2] = stride2 == -1 ? 1 : stride2;
t->storage = storage;
t->storageOffset = storageOffset;
THAtomicIncrement(&t->storage->nref);
return t;
}
THFloatTensor *THFloatTensor_newWithStorage2d(THFloatStorage *storage, long storageOffset, long size0, long stride0, long size1, long stride1)
{
THFloatTensor *t = THFloatTensor_new();
t->nDimension = 2;
t->size[0] = size0;
t->size[1] = size1;
t->stride[0] = stride0 == -1 ? size1 : stride0;
t->stride[1] = stride1 == -1 ? 1 : stride1;
t->storage = storage;
t->storageOffset = storageOffset;
THAtomicIncrement(&t->storage->nref);
return t;
}
THFloatTensor *THFloatTensor_newWithStorage1d(THFloatStorage *storage, long storageOffset, long size0, long stride0)
{
THFloatTensor *t = THFloatTensor_new();
t->nDimension = 1;
t->size[0] = size0;
t->stride[0] = stride0 == -1 ? 1 : stride0;
t->storage = storage;
t->storageOffset = storageOffset;
THAtomicIncrement(&t->storage->nref);
return t;
}
THFloatTensor *THFloatTensor_newWithTensor(THFloatTensor *tensor)
{
THFloatTensor *self = THFloatTensor_new();
THFloatTensor_set(self, tensor);
return self;
}
void THFloatTensor_zero(THFloatTensor *t)
{
memset(t->storage->data, 0, THFloatTensor_nElement(t) * sizeof(*t->storage->data));
}
void THFloatTensor_fill(THFloatTensor *t, float value)
{
THFloatVector_fill(t->storage->data, value, THFloatTensor_nElement(t));
}
void THFloatTensor_copy(THFloatTensor *tdst, THFloatTensor *tsrc)
{
float *src, *dst;
src = THFloatTensor_data(tsrc);
dst = THFloatTensor_data(tdst);
memcpy(dst, src, sizeof(*dst) * THFloatTensor_nElement(tsrc));
}
void THFloatTensor_safecopy(THFloatTensor *tdst, THFloatTensor *tsrc)
{
float *src, *dst;
long i0, i1, i2, i3;
src = THFloatTensor_data(tsrc);
dst = THFloatTensor_data(tdst);
if(tsrc->nDimension == 1)
{
for(i0 = 0; i0 < tsrc->size[0]; i0++)
dst[tdst->stride[0] * i0] = src[tsrc->stride[0] * i0];
return;
}
if(tsrc->nDimension == 2)
{
tdst->stride[0] = tdst->size[1];
tdst->stride[1] = 1;
for(i0 = 0; i0 < tsrc->size[0]; i0++)
for(i1 = 0; i1 < tsrc->size[1]; i1++)
dst[tdst->stride[0] * i0 + tdst->stride[1] * i1] = src[tsrc->stride[0] * i0 + tsrc->stride[1] * i1];
return;
}
if(tsrc->nDimension == 3)
{
tdst->stride[0] = tdst->size[1] * tdst->size[2];
tdst->stride[1] = tdst->size[2];
tdst->stride[2] = 1;
for(i0 = 0; i0 < tsrc->size[0]; i0++)
for(i1 = 0; i1 < tsrc->size[1]; i1++)
for(i2 = 0; i2 < tsrc->size[2]; i2++)
dst[tdst->stride[0] * i0 + tdst->stride[1] * i1 + tdst->stride[2] * i2] = src[tsrc->stride[0] * i0 + tsrc->stride[1] * i1 + tsrc->stride[2] * i2];
return;
}
tdst->stride[0] = tdst->size[1] * tdst->size[2] * tdst->size[3];
tdst->stride[1] = tdst->size[2] * tdst->size[3];
tdst->stride[2] = tdst->size[3];
tdst->stride[3] = 1;
for(i0 = 0; i0 < tsrc->size[0]; i0++)
for(i1 = 0; i1 < tsrc->size[1]; i1++)
for(i2 = 0; i2 < tsrc->size[2]; i2++)
for(i3 = 0; i3 < tsrc->size[3]; i3++)
dst[tdst->stride[0] * i0 + tdst->stride[1] * i1 + tdst->stride[2] * i2 + tdst->stride[3] * i3] =
src[tsrc->stride[0] * i0 + tsrc->stride[1] * i1 + tsrc->stride[2] * i2 + tsrc->stride[3] * i3];
}
void THFloatTensor_transpose(THFloatTensor *tdst, THFloatTensor *tsrc, int dimension1, int dimension2)
{
long z;
if(!tsrc)
tsrc = tdst;
THFloatTensor_set(tdst, tsrc);
if(dimension1 == dimension2)
return;
z = tdst->stride[dimension1];
tdst->stride[dimension1] = tdst->stride[dimension2];
tdst->stride[dimension2] = z;
z = tdst->size[dimension1];
tdst->size[dimension1] = tdst->size[dimension2];
tdst->size[dimension2] = z;
}
THFloatTensor *THFloatTensor_newTranspose(THFloatTensor *tensor, int dimension1_, int dimension2_)
{
THFloatTensor *self = THFloatTensor_newWithTensor(tensor);
THFloatTensor_transpose(self, NULL, dimension1_, dimension2_);
return self;
}
THFloatTensor *THFloatTensor_squeeze(THFloatTensor *t)
{
int ndim = 0, i;
THFloatTensor *t2 = THFloatTensor_newWithTensor(t);
for(i = 0; i < t->nDimension; i++)
if(t->size[i] != 1)
{
if(i != ndim)
{
t2->size[ndim] = t->size[i];
t2->stride[ndim] = t->stride[i];
}
ndim++;
}
t2->nDimension = ndim;
return t2;
}
double THExpMinusApprox(double x)
{
#if EXACT_EXPONENTIAL
return exp(-x);
#else
/* fast approximation of exp(-x) for x positive */
# define A0 (1.0)
# define A1 (0.125)
# define A2 (0.0078125)
# define A3 (0.00032552083)
# define A4 (1.0172526e-5)
if (x < 13.0)
{
/* assert(x>=0); */
double y;
y = A0+x*(A1+x*(A2+x*(A3+x*A4)));
y *= y;
y *= y;
y *= y;
y = 1/y;
return y;
}
return 0;
# undef A0
# undef A1
# undef A2
# undef A3
# undef A4
#endif
}
void sgemm_(char *transa, char *transb, int *m, int *n, int *k, float *alpha, float *a, int *lda, float *b, int *ldb, float *beta, float *c, int *ldc);
void sger_(int *m, int *n, float *alpha, float *x, int *incx, float *y, int *incy, float *a, int *lda);
void sger(int m, int n, float alpha, float *x, int incx, float *y, int incy, float *a, int lda);
void sgemv(char trans, int m, int n, float alpha, float *a, int lda, float *x, int incx, float beta, float *y, int incy);
void sgemv_(char *trans, int *m, int *n, float *alpha, float *a, int *lda, float *x, int *incx, float *beta, float *y, int *incy);
void THBlas_gemm(char transa, char transb, long m, long n, long k, float alpha, float *a, long lda, float *b, long ldb, float beta, float *c, long ldc)
{
int transa_ = ((transa == 't') || (transa == 'T'));
int transb_ = ((transb == 't') || (transb == 'T'));
if(n == 1)
ldc = m;
if(transa_)
{
if(m == 1)
lda = k;
}
else
{
if(k == 1)
lda = m;
}
if(transb_)
{
if(k == 1)
ldb = n;
}
else
{
if(n == 1)
ldb = k;
}
if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) )
{
#ifdef USEBLAS
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_lda = (int)lda;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
#ifdef ACCELERATE
cblas_sgemm(CblasColMajor, transa == 't' ? CblasTrans : CblasNoTrans, transb == 't' ? CblasTrans : CblasNoTrans,
i_m, i_n, i_k, alpha, a, i_lda, b, i_ldb, beta, c, i_ldc);
#else
sgemm_(&transa, &transb, &i_m, &i_n, &i_k, &alpha, a, &i_lda, b, &i_ldb, &beta, c, &i_ldc);
#endif
#else
sgemm(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
#endif
return;
}
THError("Wrong parameters to gemm");
}
void THBlas_gemv(char trans, long m, long n, float alpha, float *a, long lda, float *x, long incx, float beta, float *y, long incy)
{
if(n == 1)
lda = m;
if( (m <= INT_MAX) && (n <= INT_MAX) &&
(lda > 0) && (lda <= INT_MAX) &&
(incx > 0) && (incx <= INT_MAX) &&
(incy > 0) && (incy <= INT_MAX) )
{
#ifdef USEBLAS
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
#ifdef ACCELERATE
cblas_sgemv(CblasColMajor, trans == 't' ? CblasTrans : CblasNoTrans, i_m, i_n, alpha, a, i_lda, x, i_incx, beta, y, i_incy);
#else
sgemv_(&trans, &i_m, &i_n, &alpha, a, &i_lda, x, &i_incx, &beta, y, &i_incy);
#endif
#else
sgemv(trans, m, n, alpha, a, lda, x, incx, beta, y, incy);
#endif
}
}
void THBlas_ger(long m, long n, float alpha, float *x, long incx, float *y, long incy, float *a, long lda)
{
if(n == 1)
lda = m;
#ifdef USEBLAS
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
#ifdef ACCELERATE
cblas_sger(CblasColMajor, i_m, i_n, alpha, x, i_incx, y, i_incy, a, i_lda);
#else
sger_(&i_m, &i_n, &alpha, x, &i_incx, y, &i_incy, a, &i_lda);
#endif
#else
sger(m, n, alpha, x, incx, y, incy, a, lda);
#endif
}
void THFloatTensor_addmm(THFloatTensor *r_, float beta, THFloatTensor *t, float alpha, THFloatTensor *m1, THFloatTensor *m2)
{
char transpose_r, transpose_m1, transpose_m2;
THFloatTensor *r__, *m1_, *m2_;
if( (m1->nDimension != 2) || (m2->nDimension != 2))
THError("matrices expected, got %dD, %dD tensors", m1->nDimension, m2->nDimension);
if(m1->size[1] != m2->size[0])
THError("size mismatch, m1: %ld, m2: %ld", m1->size[1], m2->size[0]);
if( t->nDimension != 2 )
THError("matrix expected, got %dD tensor for t", t->nDimension);
if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) )
THError("size mismatch, t: %ld, m1: %ld, t: %ld, m2: %ld", t->size[0], m1->size[1], t->size[1], m2->size[1]);
if(t != r_)
THError("Not implemented: t != r");
/* printf("%ldx%ld = %ldx%ld X %ldx%ld\n", r_->size[0], r_->size[1], m1->size[0], m1->size[1], m2->size[0], m2->size[1]); */
/* r_ */
if(r_->stride[0] == 1 && r_->stride[1] != 0)
{
transpose_r = 'n';
r__ = r_;
}
else if(r_->stride[1] == 1 && r_->stride[0] != 0)
{
THFloatTensor *swap = m2;
m2 = m1;
m1 = swap;
transpose_r = 't';
r__ = r_;
}
else
{
THError("Transpose not implemented (1)");
return;
/* transpose_r = 'n';
r__ = THFloatTensor_newWithSize2d(r_->size[1], r_->size[0]);
THFloatTensor_copy(r__, r_);
THFloatTensor_transpose(r__, NULL, 0, 1);*/
}
/* m1 */
if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m1->stride[(transpose_r == 'n' ? 1 : 0)] != 0)
{
transpose_m1 = 'n';
m1_ = m1;
}
else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m1->stride[(transpose_r == 'n' ? 0 : 1)] != 0)
{
transpose_m1 = 't';
m1_ = m1;
}
else
{
THError("Transpose not implemented (2)");
return;
/*transpose_m1 = (transpose_r == 'n' ? 't' : 'n');
m1_ = THFloatTensor_newContiguous(m1);*/
}
/* m2 */
if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m2->stride[(transpose_r == 'n' ? 1 : 0)] != 0)
{
transpose_m2 = 'n';
m2_ = m2;
}
else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m2->stride[(transpose_r == 'n' ? 0 : 1)] != 0)
{
transpose_m2 = 't';
m2_ = m2;
}
else
{
THError("Transpose not implemented (3)");
return;
/*transpose_m2 = (transpose_r == 'n' ? 't' : 'n');
m2_ = THFloatTensor_(newContiguous)(m2);*/
}
/* do the operation */
THBlas_gemm(transpose_m1,
transpose_m2,
r__->size[(transpose_r == 'n' ? 0 : 1)],
r__->size[(transpose_r == 'n' ? 1 : 0)],
m1_->size[(transpose_r == 'n' ? 1 : 0)],
alpha,
THFloatTensor_data(m1_),
(transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]),
THFloatTensor_data(m2_),
(transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]),
beta,
THFloatTensor_data(r__),
r__->stride[(transpose_r == 'n' ? 1 : 0)]);
/* free intermediate variables */
if(m1_ != m1)
THFloatTensor_free(m1_);
if(m2_ != m2)
THFloatTensor_free(m2_);
if(r__ != r_)
THError("freeCopyTo not implemented");
/*THFloatTensor_(freeCopyTo)(r__, r_);*/
}
void THFloatTensor_addmv(THFloatTensor *r_, float beta, THFloatTensor *t, float alpha, THFloatTensor *mat, THFloatTensor *vec)
{
if( (mat->nDimension != 2) || (vec->nDimension != 1) )
THError("matrix and vector expected, got %dD, %dD", mat->nDimension, vec->nDimension);
if( mat->size[1] != vec->size[0] )
THError("size mismatch, %ld, %ld", mat->size[1], vec->size[0]);
if(t->nDimension != 1)
THError("vector expected, got t: %dD", t->nDimension);
if(t->size[0] != mat->size[0])
THError("size mismatch, t: %ld, mat: %ld", t->size[0], mat->size[0]);
if(r_ != t)
THError("r_ != t not implemented");
if(mat->stride[0] == 1)
{
THBlas_gemv('n', mat->size[0], mat->size[1], alpha, THFloatTensor_data(mat), mat->stride[1],
THFloatTensor_data(vec), vec->stride[0], beta, THFloatTensor_data(r_), r_->stride[0]);
}
else if(mat->stride[1] == 1)
{
THBlas_gemv('t', mat->size[1], mat->size[0], alpha, THFloatTensor_data(mat), mat->stride[0],
THFloatTensor_data(vec), vec->stride[0], beta, THFloatTensor_data(r_), r_->stride[0]);
}
else THError("addmv for non-contiguous not implemented");
}
#define TH_OMP_OVERHEAD_THRESHOLD 100000
void THFloatTensor_mul(THFloatTensor *r_, THFloatTensor *t, float value)
{
float *tp = THFloatTensor_data(t);
float *rp = THFloatTensor_data(r_);
long i;
long sz = THFloatTensor_nElement(t);
#pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = tp[i] * value;
}
void THFloatTensor_addr(THFloatTensor *r_, float beta, THFloatTensor *t, float alpha, THFloatTensor *vec1, THFloatTensor *vec2)
{
if( (vec1->nDimension != 1) || (vec2->nDimension != 1) )
THError("vector and vector expected, got %dD, %dD tensors", vec1->nDimension, vec2->nDimension);
if(t->nDimension != 2)
THError("expected matrix, got %dD tensor for t", t->nDimension);
if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) )
THError("size mismatch, t: %ld, vec1: %ld, t: %ld, vec2: %ld", t->size[0], vec1->size[0], t->size[1], vec2->size[0]);
if(r_ != t)
THError("r_ != t not implemented");
if(beta != 1)
THFloatTensor_mul(r_, r_, beta);
if(r_->stride[0] == 1)
{
THBlas_ger(vec1->size[0], vec2->size[0],
alpha, THFloatTensor_data(vec1), vec1->stride[0],
THFloatTensor_data(vec2), vec2->stride[0],
THFloatTensor_data(r_), r_->stride[1]);
}
else if(r_->stride[1] == 1)
{
THBlas_ger(vec2->size[0], vec1->size[0],
alpha, THFloatTensor_data(vec2), vec2->stride[0],
THFloatTensor_data(vec1), vec1->stride[0],
THFloatTensor_data(r_), r_->stride[0]);
}
else THError("addr for non-contiguous not implemented");
}
void printtensor(THFloatTensor *t)
{
if(t->nDimension == 2)
{
int i, j;
for(i = 0; i < t->size[0]; i++)
{
printf("%d) ", i);
for(j = 0; j < t->size[1]; j++)
printf("%f ", t->storage->data[i * t->stride[0] + j]);
printf("\n");
}
} else printf("printtensor: nDimension not implemented\n");
}
void THFloatTensor_validXCorr2Dptr(float *r_,
float alpha,
float *t_, long ir, long ic,
float *k_, long kr, long kc,
long sr, long sc)
{
long or = (ir - kr) / sr + 1;
long oc = (ic - kc) / sc + 1;
long xx, yy, kx, ky;
if ((sc != 1) || (oc < 4)) {
/* regular convolution */
for(yy = 0; yy < or; yy++) {
for(xx = 0; xx < oc; xx++) {
/* Dot product in two dimensions... (between input image and the mask) */
float *pi_ = t_ + yy*sr*ic + xx*sc;
float *pw_ = k_;
float sum = 0;
for(ky = 0; ky < kr; ky++) {
for(kx = 0; kx < kc; kx++) {
sum += pi_[kx]*pw_[kx];
}
pi_ += ic; /* next input line */
pw_ += kc; /* next mask line */
}
/* Update output */
*r_++ += alpha*sum;
}
}
} else {
/* SSE-based convolution */
for(yy = 0; yy < or; yy++) {
float *pi_ = t_ + yy*sr*ic;
float *pw_ = k_;
for (ky = 0; ky < kr; ky++) {
float *pis_ = pi_;
for (kx = 0; kx < kc; kx++) {
THFloatVector_add(r_, pis_, alpha*pw_[kx], oc);
pis_++;
}
pi_ += ic; /* next input line */
pw_ += kc; /* next mask line */
}
r_ += oc;
}
}
}
void THFloatTensor_conv2Dmv(THFloatTensor *r_, float beta, float alpha, THFloatTensor *t_, THFloatTensor *k_, long srow, long scol, const char *vf, const char *xc)
{
long nInputPlane, nInputRows, nInputCols;
long nKernelRows, nKernelCols;
long nOutputPlane, nOutputRows, nOutputCols;
long istride0, kstride0, kstride1;
THFloatTensor *input;
THFloatTensor *kernel;
float *input_data;
float *weight_data;
float *output_data;
long nelem;
long k;
if(t_->nDimension != 3)
THError("input: 3D Tensor expected");
if(k_->nDimension != 4)
THError("kernel: 4D Tensor expected");
if(srow < 1)
THError("Stride should be a positive integer");
if(scol < 1)
THError("Stride should be a positive integer");
if(*vf != 'V' || *xc != 'X')
THError("Type of convolution can be 'V','X' only");
input = t_;
kernel = k_;
nInputPlane = input->size[0];
istride0 = input->stride[0];
nInputRows = input->size[1];
nInputCols = input->size[2];
kstride0 = kernel->stride[0];
kstride1 = kernel->stride[1];
nKernelRows = kernel->size[2];
nKernelCols = kernel->size[3];
nOutputPlane = kernel->size[0];
if(kernel->size[1] != nInputPlane)
THError("invalid number of input planes");
if(!(nInputRows >= nKernelRows && nInputCols >= nKernelCols))
THError("conv2Dmv : Input image is smaller than kernel");
nOutputRows = (nInputRows - nKernelRows) / srow + 1;
nOutputCols = (nInputCols - nKernelCols) / scol + 1;
nelem = THFloatTensor_nElement(r_);
THFloatTensor_resize3d(r_, nOutputPlane, nOutputRows, nOutputCols);
input_data = THFloatTensor_data(input);
weight_data = THFloatTensor_data(kernel);
output_data = THFloatTensor_data(r_);
if (nelem == 0 || beta == 0 || nelem != THFloatTensor_nElement(r_))
{
/*THFloatTensor_zero)(r_);*/
#pragma omp parallel for private(k)
for (k = 0; k < r_->size[0]; k++)
{
float* ptr_output = output_data + k*nOutputCols*nOutputRows;
long l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] = 0.0;
}
}
else if (beta != 1)
{
/*THFloatTensor_mul)(r_, beta);*/
#pragma omp parallel for private(k)
for (k = 0; k < r_->size[0]; k++)
{
float* ptr_output = output_data + k*nOutputCols*nOutputRows;
long l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] *= beta;
}
}
#pragma omp parallel for private(k)
for(k = 0; k < nOutputPlane; k++)
{
long i;
/* get output */
float *ptr_output = output_data + k*nOutputCols*nOutputRows;
for(i = 0; i < nInputPlane; i++)
{
/* get kernel */
float *ptr_weight = weight_data + k*kstride0 + i*kstride1;
/* get input */
float *ptr_input = input_data + i*istride0;
/* do image, kernel convolution */
THFloatTensor_validXCorr2Dptr(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
}
}
}
void THFloatTensor_conv2Dmm(THFloatTensor *r_, float beta, float alpha, THFloatTensor *t_, THFloatTensor *k_, long srow, long scol, const char *vf, const char *xc)
{
long nInputPlane, nInputRows, nInputCols;
long nKernelRows, nKernelCols;
long nOutputPlane, nOutputRows, nOutputCols;
long kstride0, kstride1;
THFloatTensor *input;
THFloatTensor* kernel;
long nbatch;
long nelem;
float *input_data;
float *weight_data;
float *output_data;
long p;
if(t_->nDimension != 4)
THError("input: 3D Tensor expected");
if(k_->nDimension != 4)
THError("kernel: 4D Tensor expected");
if(srow < 1)
THError("Stride should be a positive integer");
if(scol < 1)
THError("Stride should be a positive integer");
if(*vf != 'V' || *xc != 'X')
THError("Type of convolution can be 'V','X' only");
input = t_;
kernel = k_;
nbatch = input->size[0];
nInputPlane = input->size[1];
nInputRows = input->size[2];
nInputCols = input->size[3];
kstride0 = kernel->stride[0];
kstride1 = kernel->stride[1];
nKernelRows = kernel->size[2];
nKernelCols = kernel->size[3];
nOutputPlane = kernel->size[0];
if(kernel->size[1] != nInputPlane)
THError("invalid number of input planes");
if(!(nInputRows >= nKernelRows && nInputCols >= nKernelCols))
THError("conv2Dmv : Input image is smaller than kernel");
nOutputRows = (nInputRows - nKernelRows) / srow + 1;
nOutputCols = (nInputCols - nKernelCols) / scol + 1;
nelem = THFloatTensor_nElement(r_);
THFloatTensor_resize4d(r_, nbatch, nOutputPlane, nOutputRows, nOutputCols);
input_data = THFloatTensor_data(input);
weight_data = THFloatTensor_data(kernel);
output_data = THFloatTensor_data(r_);
if (nelem == 0 || beta == 0 || nelem != THFloatTensor_nElement(r_))
{
/*THFloatTensor_(zero)(r_);*/
#pragma omp parallel for private(p)
for (p=0; p < r_->size[0]; p++)
{
long k;
for (k = 0; k < r_->size[1]; k++)
{
float* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows;
long l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] = 0.0;
}
}
}
else if (beta != 1)
{
/*THFloatTensor_(mul)(r_, beta);*/
#pragma omp parallel for private(p)
for(p=0; p < r_->size[0]; p++)
{
long k;
for (k = 0; k < r_->size[1]; k++)
{
float* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows;
long l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] *= beta;
}
}
}
#pragma omp parallel for private(p)
for(p=0; p < nbatch; p++)
{
long k;
for(k = 0; k < nOutputPlane; k++)
{
long i;
/* get output */
float *ptr_output = output_data + p*nOutputPlane*nOutputCols*nOutputRows + k*nOutputCols*nOutputRows;
for(i = 0; i < nInputPlane; i++)
{
/* get kernel */
float *ptr_weight = weight_data + k*kstride0 + i*kstride1;
/* get input */
float *ptr_input = input_data + p*nInputPlane*nInputRows*nInputCols + i*nInputRows*nInputCols;
/* do image, kernel convolution */
THFloatTensor_validXCorr2Dptr(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
}
}
}
}
#ifndef USEBLAS
void THFloatTensor_convmm(THFloatTensor *r, float beta, float alpha, THFloatTensor *filt, THFloatTensor *m,
int kH, int kW, int dH, int dW, int padH, int padW)
{
struct sgemmargs args;
args.transa = 0;
args.transb = 0;
args.m = r->size[1] * r->size[2];
args.n = r->size[0];
args.k = filt->size[1];
args.alpha = alpha;
args.beta = beta;
args.lda = m->stride[0];
args.ldb = filt->stride[0];
args.ldc = r->stride[0];
args.a = THFloatTensor_data(m);
args.b = THFloatTensor_data(filt);
args.c = THFloatTensor_data(r);
args.ks0 = kH * kW;
args.ks1 = kW;
args.is0 = m->stride[0];
args.is1 = m->stride[1];
args.ih = m->size[1];
args.os0 = r->stride[0];
args.os1 = r->stride[1];
args.dW = dW;
args.dH = dH;
args.padW = padW;
args.padH = padH;
sgemmargs(&args);
}
#endif
#ifdef HAVEFP16
void tofp16(__fp16 *dst, const float *src, size_t len)
{
size_t i;
for(i = 0; i < len; i++)
dst[i] = src[i];
}
void fromfp16(float *dst, const __fp16 *src, size_t len)
{
size_t i;
for(i = 0; i < len; i++)
dst[i] = src[i];
}
#endif
#ifdef USEQSML
void init_thnets4qsml_conv(THNETWORK *network)
{
int m, kW, kH, inP, outP;
struct module newmod;
for(m = 0; m < network->net->nelem; m++){
newmod = network->net->modules[m];
if(newmod.type==MT_SpatialConvolutionMM ||
newmod.type==MT_SpatialConvolutionVirtMM ||
newmod.type==MT_SpatialConvolution){
kW = newmod.SpatialConvolution.kW;
kH = newmod.SpatialConvolution.kH;
inP = newmod.SpatialConvolution.nInputPlane;
outP = newmod.SpatialConvolution.nOutputPlane;
transform_mem(newmod,kW,kH,inP,outP);
}
}
}
//weight thnets[col,row,plane,outplane] -> weight qsml[outplane,plane,col,row]
void transform_mem(struct module newmod, int col, int row, int plane, int outp)
{
int i, j, k, m, isx, idx;
int wsize = col*row*plane*outp;
float* weightout = THFloatTensor_data(newmod.SpatialConvolution.weight);
float* weightin = (float*)malloc(wsize*sizeof(float));
memcpy(weightin, weightout, wsize*sizeof(float));
//LOGD("%d,%d,%d,%d, %d\n",col,row,plane,outp,wsize);
for(m = 0; m < outp; m++) {
for(k = 0; k < plane; k++) {
for(j = 0;j < row; j++) {
for(i = 0; i < col; i++) {
isx = i + j*col + k*col*row + m*col*row*plane;
idx = m + k*outp + i*outp*plane + j*outp*col*plane;
weightout[idx] = weightin[isx];
}
}
}
}
}
//input thnets[col,row,plane] -> input qsml[plane,col,row]
float* transform_mem_input(float* in1, int col, int row, int plane)
{
int i, j, k, isx, idx;
int wsize = col*row*plane;
float* out = (float*)malloc(wsize*sizeof(float));
for(k = 0; k < plane; k++) {
for(j = 0;j < row; j++) {
for(i = 0; i < col; i++) {
isx = i + j*col + k*col*row;
idx = k + i*plane + j*col*plane;
out[idx] = in1[isx];
}
}
}
return out;
}
#endif
|
GB_binop__bshift_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bshift_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__bshift_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__bshift_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__bshift_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bshift_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bshift_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_int64)
// C=scalar+B GB (_bind1st__bshift_int64)
// C=scalar+B' GB (_bind1st_tran__bshift_int64)
// C=A+scalar GB (_bind2nd__bshift_int64)
// C=A'+scalar GB (_bind2nd_tran__bshift_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int8_t
// BinaryOp: cij = GB_bitshift_int64 (aij, bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_bitshift_int64 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_INT64 || GxB_NO_BSHIFT_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bshift_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bshift_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bshift_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bshift_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bshift_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bshift_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bshift_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bshift_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bshift_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_bitshift_int64 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bshift_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_bitshift_int64 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_int64 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__bshift_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_int64 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__bshift_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
SingleBeginLink.c | int main() {
#pragma omp single
{
}
}
|
GB_unop__identity_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int8_int8)
// op(A') function: GB (_unop_tran__identity_int8_int8)
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
1
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int8_int8)
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int8_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cpu_ctc.h | #pragma once
#include <tuple>
#include <cmath>
#include <limits>
#include <algorithm>
#include <numeric>
#if !defined(CTC_DISABLE_OMP) && !defined(APPLE)
#include <omp.h>
#endif
#include "ctc_helper.h"
template<typename ProbT>
class CpuCTC {
public:
// Noncopyable
CpuCTC(int alphabet_size, int minibatch, void* workspace, int num_threads,
int blank_label) :
alphabet_size_(alphabet_size), minibatch_(minibatch),
num_threads_(num_threads), blank_label_(blank_label),
workspace_(workspace) {
#if defined(CTC_DISABLE_OMP) || defined(APPLE)
#else
if (num_threads > 0) {
omp_set_num_threads(num_threads);
} else {
num_threads_ = omp_get_max_threads();
}
#endif
};
CpuCTC(const CpuCTC&) = delete;
CpuCTC& operator=(const CpuCTC&) = delete;
ctcStatus_t cost_and_grad(const ProbT* const activations,
ProbT *grads,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
ctcStatus_t score_forward(const ProbT* const activations,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
private:
class CpuCTC_metadata {
private:
int setup_labels(const int* const labels, int blank_label, int L, int S);
public:
CpuCTC_metadata(int L, int S, int T, int mb, int alphabet_size,
void* workspace, size_t bytes_used, int blank_label,
const int* const labels);
ProbT* alphas;
ProbT* betas;
int* labels_w_blanks;
int* e_inc;
int* s_inc;
ProbT* output;
int repeats;
};
int alphabet_size_; // Number of characters plus blank
int minibatch_;
int num_threads_;
int blank_label_;
void* workspace_;
void softmax(const ProbT* const activations, ProbT* probs,
const int* const input_lengths);
std::tuple<ProbT, bool>
cost_and_grad_kernel(ProbT *grad, const ProbT* const probs,
const int* const labels, int T, int L,
int mb, size_t bytes_used);
ProbT compute_alphas(const ProbT* probs, int repeats, int S, int T,
const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas);
ProbT compute_betas_and_grad(ProbT* grad, const ProbT* const probs,
ProbT log_partition, int repeats,
int S, int T, const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas,
ProbT* betas,
ProbT* output);
};
template<typename ProbT>
CpuCTC<ProbT>::CpuCTC_metadata::CpuCTC_metadata(int L, int S, int T, int mb,
int alphabet_size,
void* workspace, size_t bytes_used,
int blank_label,
const int* const labels) {
alphas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * S * T;
std::fill(alphas, alphas + S * T, ctc_helper::neg_inf<ProbT>());
betas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * S;
std::fill(betas, betas + S, ctc_helper::neg_inf<ProbT>());
labels_w_blanks = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
e_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
s_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
output = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * alphabet_size;
repeats = setup_labels(labels, blank_label, L, S);
}
template<typename ProbT>
int CpuCTC<ProbT>::CpuCTC_metadata::setup_labels(const int* const labels,
int blank_label, int L, int S) {
int e_counter = 0;
int s_counter = 0;
s_inc[s_counter++] = 1;
int repeats = 0;
for (int i = 1; i < L; ++i) {
if (labels[i-1] == labels[i]) {
s_inc[s_counter++] = 1;
s_inc[s_counter++] = 1;
e_inc[e_counter++] = 1;
e_inc[e_counter++] = 1;
++repeats;
}
else {
s_inc[s_counter++] = 2;
e_inc[e_counter++] = 2;
}
}
e_inc[e_counter++] = 1;
for (int i = 0; i < L; ++i) {
labels_w_blanks[2 * i] = blank_label;
labels_w_blanks[2 * i + 1] = labels[i];
}
labels_w_blanks[S - 1] = blank_label;
return repeats;
}
template<typename ProbT>
void
CpuCTC<ProbT>::softmax(const ProbT* const activations, ProbT* probs,
const int* const input_lengths) {
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
for(int c = 0; c < input_lengths[mb]; ++c) {
int col_offset = (mb + minibatch_ * c) * alphabet_size_;
ProbT max_activation = -std::numeric_limits<ProbT>::infinity();
for(int r = 0; r < alphabet_size_; ++r)
max_activation = std::max(max_activation, activations[r + col_offset]);
ProbT denom = ProbT(0.);
for(int r = 0; r < alphabet_size_; ++r) {
probs[r + col_offset] = std::exp(activations[r + col_offset] - max_activation);
denom += probs[r + col_offset];
}
for(int r = 0; r < alphabet_size_; ++r) {
probs[r + col_offset] /= denom;
}
}
}
}
template<typename ProbT>
std::tuple<ProbT, bool>
CpuCTC<ProbT>::cost_and_grad_kernel(ProbT *grad, const ProbT* const probs,
const int* const labels,
int T, int L, int mb, size_t bytes_used) {
const int S = 2*L + 1; // Number of labels with blanks
CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_, bytes_used, blank_label_, labels);
bool over_threshold = false;
if (L + ctcm.repeats > T) {
return std::make_tuple(ProbT(0), over_threshold); // TODO, not right to return 0
}
ProbT llForward = compute_alphas(probs, ctcm.repeats, S, T, ctcm.e_inc,
ctcm.s_inc, ctcm.labels_w_blanks,
ctcm.alphas);
ProbT llBackward = compute_betas_and_grad(grad, probs, llForward, ctcm.repeats,
S, T, ctcm.e_inc, ctcm.s_inc,
ctcm.labels_w_blanks,
ctcm.alphas,
ctcm.betas,
ctcm.output);
ProbT diff = std::abs(llForward - llBackward);
if (diff > ctc_helper::threshold) {
over_threshold = true;
}
return std::make_tuple(-llForward, over_threshold);
}
// Computes forward probabilities
template<typename ProbT>
ProbT CpuCTC<ProbT>::compute_alphas(const ProbT* probs, int repeats, int S, int T,
const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas) {
int start = (((S /2) + repeats - T) < 0) ? 0 : 1,
end = S > 1 ? 2 : 1;
for (int i = start; i < end; ++i) {
alphas[i] = std::log(probs[labels[i]]);
}
for(int t = 1; t < T; ++t) {
int remain = (S / 2) + repeats - (T - t);
if(remain >= 0)
start += s_inc[remain];
if(t <= (S / 2) + repeats)
end += e_inc[t - 1];
int startloop = start;
int idx1 = t * S, idx2 = (t - 1) * S, idx3 = t * (alphabet_size_ * minibatch_);
if (start == 0) {
alphas[idx1] = alphas[idx2] + std::log(probs[blank_label_ + idx3]);
startloop += 1;
}
for(int i = startloop; i < end; ++i) {
ProbT prev_sum = ctc_helper::log_plus<ProbT>()(alphas[i + idx2], alphas[(i-1) + idx2]);
// Skip two if not on blank and not on repeat.
if (labels[i] != blank_label_ && i != 1 && labels[i] != labels[i-2])
prev_sum = ctc_helper::log_plus<ProbT>()(prev_sum, alphas[(i-2) + idx2]);
alphas[i + idx1] = prev_sum + std::log(probs[labels[i] + idx3]);
}
}
ProbT loglike = ctc_helper::neg_inf<ProbT>();
for(int i = start; i < end; ++i) {
loglike = ctc_helper::log_plus<ProbT>()(loglike, alphas[i + (T - 1) * S]);
}
return loglike;
}
// Starting from T, we sweep backward over the alpha array computing one column
// of betas as we go. At each position we can update product alpha * beta and then
// sum into the gradient associated with each label.
// NOTE computes gradient w.r.t UNNORMALIZED final layer activations.
// Assumed passed in grads are already zeroed!
template<typename ProbT>
ProbT CpuCTC<ProbT>::compute_betas_and_grad(ProbT* grad, const ProbT* const probs,
ProbT log_partition, int repeats,
int S, int T, const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas,
ProbT* betas,
ProbT* output) {
int start = S > 1 ? (S - 2) : 0,
end = (T > (S / 2) + repeats) ? S : S-1;
std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>());
//set the starting values in the beta column at the very right edge
for (int i = start; i < end; ++i) {
betas[i] = std::log(probs[labels[i] + (T - 1) * (alphabet_size_ * minibatch_)]);
//compute alpha * beta in log space at this position in (S, T) space
alphas[i + (T - 1) * S] += betas[i];
//update the gradient associated with this label
//essentially performing a reduce-by-key in a sequential manner
output[labels[i]] =
ctc_helper::log_plus<ProbT>()(alphas[i + (T - 1) * S], output[labels[i]]);
}
//update the gradient wrt to each unique label
for (int i = 0; i < alphabet_size_; ++i) {
int idx3 = (T - 1) * alphabet_size_ * minibatch_ + i;
if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() ||
probs[idx3] == 0.0) {
grad[idx3] = probs[idx3];
} else {
grad[idx3] = probs[idx3] - std::exp(output[i] -
std::log(probs[idx3]) - log_partition);
}
}
//loop from the second to last column all the way to the left
for(int t = T - 2; t >= 0; --t) {
int remain = (S / 2) + repeats - (T - t);
if(remain >= -1)
start -= s_inc[remain + 1];
if(t < (S / 2) + repeats)
end -= e_inc[t];
int endloop = end == S ? end - 1 : end;
int idx1 = t * S, idx3 = t * (alphabet_size_ * minibatch_);
std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>());
for(int i = start; i < endloop; ++i) {
ProbT next_sum = ctc_helper::log_plus<ProbT>()(betas[i], betas[(i+1)]);
// Skip two if not on blank and not on repeat.
if (labels[i] != blank_label_ && i != (S-2) && labels[i] != labels[i+2]){
next_sum = ctc_helper::log_plus<ProbT>()(next_sum, betas[(i+2)]);
}
betas[i] = next_sum + std::log(probs[labels[i] + idx3]);
//compute alpha * beta in log space
alphas[i + idx1] += betas[i];
//update the gradient associated with this label
output[labels[i]] =
ctc_helper::log_plus<ProbT>()(alphas[i + idx1], output[labels[i]]);
}
if (end == S) {
betas[(S-1)] = betas[(S-1)] + std::log(probs[blank_label_ + idx3]);
alphas[(S-1) + idx1] += betas[(S-1)];
output[labels[S-1]] =
ctc_helper::log_plus<ProbT>()(alphas[S-1 + idx1], output[labels[S-1]]);
}
//go over the unique labels and compute the final grad
// wrt to each one at this time step
for (int i = 0; i < alphabet_size_; ++i) {
if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() ||
probs[idx3] == 0.0) {
grad[idx3] = probs[idx3];
} else {
grad[idx3] = probs[idx3] - std::exp(output[i] -
std::log(probs[idx3]) - log_partition);
}
++idx3;
}
}
ProbT loglike = ctc_helper::neg_inf<ProbT>();
for(int i = start; i < end; ++i) {
loglike = ctc_helper::log_plus<ProbT>()(loglike, betas[i]);
}
return loglike;
}
template<typename ProbT>
ctcStatus_t
CpuCTC<ProbT>::cost_and_grad(const ProbT* const activations,
ProbT *grads,
ProbT *costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
if (activations == nullptr ||
grads == nullptr ||
costs == nullptr ||
flat_labels == nullptr ||
label_lengths == nullptr ||
input_lengths == nullptr
)
return CTC_STATUS_INVALID_VALUE;
ProbT* probs = static_cast<ProbT *>(workspace_);
int maxT = *std::max_element(input_lengths, input_lengths + minibatch_);
size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT;
//per minibatch memory
size_t per_minibatch_bytes = 0;
int maxL = *std::max_element(label_lengths, label_lengths + minibatch_);;
int maxS = 2 * maxL + 1;
//output
per_minibatch_bytes += sizeof(float) * alphabet_size_;
//alphas
per_minibatch_bytes += sizeof(float) * maxS * maxT;
//betas
per_minibatch_bytes += sizeof(float) * maxS;
//labels w/blanks, e_inc, s_inc
per_minibatch_bytes += 3 * sizeof(int) * maxS;
softmax(activations, probs, input_lengths);
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int L = label_lengths[mb]; // Number of labels in transcription
bool mb_status;
std::tie(costs[mb], mb_status) =
cost_and_grad_kernel(grads + mb * alphabet_size_,
probs + mb * alphabet_size_,
flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0),
T, L, mb,
bytes_used + mb * per_minibatch_bytes);
}
return CTC_STATUS_SUCCESS;
}
template<typename ProbT>
ctcStatus_t CpuCTC<ProbT>::score_forward(const ProbT* const activations,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
if (activations == nullptr ||
costs == nullptr ||
flat_labels == nullptr ||
label_lengths == nullptr ||
input_lengths == nullptr
)
return CTC_STATUS_INVALID_VALUE;
ProbT* probs = static_cast<ProbT *>(workspace_);
int maxT = *std::max_element(input_lengths, input_lengths + minibatch_);
size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT;
//per minibatch memory
size_t per_minibatch_bytes = 0;
int maxL = *std::max_element(label_lengths, label_lengths + minibatch_);
int maxS = 2 * maxL + 1;
//output
per_minibatch_bytes += sizeof(float) * alphabet_size_;
//alphas
per_minibatch_bytes += sizeof(float) * maxS * maxT;
//betas
per_minibatch_bytes += sizeof(float) * maxS;
//labels w/blanks, e_inc, s_inc
per_minibatch_bytes += 3 * sizeof(int) * maxS;
softmax(activations, probs, input_lengths);
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int L = label_lengths[mb]; // Number of labels in transcription
const int S = 2*L + 1; // Number of labels with blanks
CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_,
bytes_used + mb * per_minibatch_bytes, blank_label_,
flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0));
if (L + ctcm.repeats > T)
costs[mb] = ProbT(0);
else {
costs[mb] = -compute_alphas(probs + mb * alphabet_size_, ctcm.repeats, S, T,
ctcm.e_inc, ctcm.s_inc, ctcm.labels_w_blanks,
ctcm.alphas);
}
}
return CTC_STATUS_SUCCESS;
}
|
GB_unop__ainv_uint16_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_uint16_uint16)
// op(A') function: GB (_unop_tran__ainv_uint16_uint16)
// C type: uint16_t
// A type: uint16_t
// cast: uint16_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = aij ; \
Cx [pC] = -z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_uint16_uint16)
(
uint16_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = -z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_uint16_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gru_utils.h | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "lite/backends/arm/math/sgemm.h"
namespace paddle {
namespace lite {
namespace arm {
namespace math {
template <typename T>
struct GRUMetaValue {
T* gate_weight;
T* state_weight;
T* gate_value;
T* reset_output_value;
T* output_value;
T* prev_out_value;
};
template <typename Dtype>
inline void gru_add_with_bias(
const Dtype* din, const Dtype* bias, Dtype* dout, int batch, int size);
template <>
inline void gru_add_with_bias(
const float* din, const float* bias, float* dout, int batch, int size) {
#pragma omp parallel for
for (int i = 0; i < batch; ++i) {
int j = 0;
auto din_batch = din + i * size;
auto dout_batch = dout + i * size;
float32x4_t vb0 = vld1q_f32(bias);
float32x4_t vin0 = vld1q_f32(din_batch);
float32x4_t vout0;
float32x4_t vout1;
float32x4_t vin1;
float32x4_t vb1;
for (; j < size - 7; j += 8) {
vin1 = vld1q_f32(din_batch + j + 4);
vb1 = vld1q_f32(bias + j + 4);
vout0 = vaddq_f32(vb0, vin0);
vout1 = vaddq_f32(vb1, vin1);
vb0 = vld1q_f32(bias + j + 8);
vin0 = vld1q_f32(din_batch + j + 8);
vst1q_f32(dout_batch + j, vout0);
vst1q_f32(dout_batch + j + 4, vout1);
}
for (; j < size; ++j) {
dout_batch[j] = din_batch[j] + bias[j];
}
}
}
template <lite_api::ActivationType Act>
static void gru_unit_reset_act_impl(float* updata_gate,
int stride_update,
float* reset_gate,
int stride_reset,
const float* hidden_prev,
int stride_hidden_prev,
float* reset_hidden_prev,
int stride_reset_hidden_prev,
int frame_size,
int batch_size) {
#pragma omp parallel for
for (int b = 0; b < batch_size; ++b) {
float32x4_t vpre0 = vdupq_n_f32(0.f);
float32x4_t vpre1 = vdupq_n_f32(0.f);
float prev = 0.f;
int i = 0;
for (; i < frame_size - 7; i += 8) {
float32x4_t vu0 = vld1q_f32(updata_gate + i);
float32x4_t vu1 = vld1q_f32(updata_gate + i + 4);
float32x4_t vr0 = vld1q_f32(reset_gate + i);
float32x4_t vr1 = vld1q_f32(reset_gate + i + 4);
float32x4_t vau0 = lite::arm::math::vactive_f32<Act>(vu0);
float32x4_t vau1 = lite::arm::math::vactive_f32<Act>(vu1);
if (hidden_prev) {
vpre0 = vld1q_f32(hidden_prev + i);
vpre1 = vld1q_f32(hidden_prev + i + 4);
}
float32x4_t var0 = lite::arm::math::vactive_f32<Act>(vr0);
float32x4_t var1 = lite::arm::math::vactive_f32<Act>(vr1);
vst1q_f32(updata_gate + i, vau0);
vst1q_f32(updata_gate + i + 4, vau1);
float32x4_t vres0 = vmulq_f32(vpre0, var0);
float32x4_t vres1 = vmulq_f32(vpre1, var1);
vst1q_f32(reset_gate + i, var0);
vst1q_f32(reset_gate + i + 4, var1);
vst1q_f32(reset_hidden_prev + i, vres0);
vst1q_f32(reset_hidden_prev + i + 4, vres1);
}
for (; i < frame_size; ++i) {
updata_gate[i] = lite::arm::math::active_f32<Act>(updata_gate[i]);
reset_gate[i] = lite::arm::math::active_f32<Act>(reset_gate[i]);
if (hidden_prev) {
prev = hidden_prev[i];
}
reset_hidden_prev[i] = reset_gate[i] * prev;
}
updata_gate += stride_update;
reset_gate += stride_reset;
if (hidden_prev) {
hidden_prev += stride_hidden_prev;
}
reset_hidden_prev += stride_reset_hidden_prev;
}
}
template <lite_api::ActivationType Act>
static void gru_unit_out_act_impl(bool origin_mode,
float* updata_gate,
int stride_update,
float* cell_state,
int stride_cell_state,
const float* hidden_prev,
int stride_hidden_prev,
float* hidden,
int stride_hidden,
int frame_size,
int batch_size) {
#pragma omp parallel for
for (int b = 0; b < batch_size; ++b) {
float32x4_t vpre0 = vdupq_n_f32(0.f);
float32x4_t vpre1 = vdupq_n_f32(0.f);
float prev = 0.f;
int i = 0;
if (origin_mode) {
for (; i < frame_size - 7; i += 8) {
float32x4_t vc0 = vld1q_f32(cell_state + i);
float32x4_t vc1 = vld1q_f32(cell_state + i + 4);
float32x4_t vu0 = vld1q_f32(updata_gate + i);
float32x4_t vu1 = vld1q_f32(updata_gate + i + 4);
float32x4_t vac0 = lite::arm::math::vactive_f32<Act>(vc0);
float32x4_t vac1 = lite::arm::math::vactive_f32<Act>(vc1);
if (hidden_prev) {
vpre0 = vld1q_f32(hidden_prev + i);
vpre1 = vld1q_f32(hidden_prev + i + 4);
}
float32x4_t vh0 = vmlsq_f32(vac0, vu0, vac0);
float32x4_t vh1 = vmlsq_f32(vac1, vu1, vac1);
vst1q_f32(cell_state + i, vac0);
vst1q_f32(cell_state + i + 4, vac1);
vh0 = vmlaq_f32(vh0, vu0, vpre0);
vh1 = vmlaq_f32(vh1, vu1, vpre1);
vst1q_f32(hidden + i, vh0);
vst1q_f32(hidden + i + 4, vh1);
}
for (; i < frame_size; ++i) {
if (hidden_prev) {
prev = hidden_prev[i];
}
cell_state[i] = lite::arm::math::active_f32<Act>(cell_state[i]);
hidden[i] =
cell_state[i] * (1.f - updata_gate[i]) + updata_gate[i] * prev;
}
} else {
for (; i < frame_size - 7; i += 8) {
float32x4_t vc0 = vld1q_f32(cell_state + i);
float32x4_t vc1 = vld1q_f32(cell_state + i + 4);
float32x4_t vu0 = vld1q_f32(updata_gate + i);
float32x4_t vu1 = vld1q_f32(updata_gate + i + 4);
float32x4_t vac0 = lite::arm::math::vactive_f32<Act>(vc0);
float32x4_t vac1 = lite::arm::math::vactive_f32<Act>(vc1);
if (hidden_prev) {
vpre0 = vld1q_f32(hidden_prev + i);
vpre1 = vld1q_f32(hidden_prev + i + 4);
}
float32x4_t vh0 = vmlsq_f32(vpre0, vpre0, vu0);
float32x4_t vh1 = vmlsq_f32(vpre1, vpre1, vu1);
vst1q_f32(cell_state + i, vac0);
vst1q_f32(cell_state + i + 4, vac1);
vh0 = vmlaq_f32(vh0, vu0, vac0);
vh1 = vmlaq_f32(vh1, vu1, vac1);
vst1q_f32(hidden + i, vh0);
vst1q_f32(hidden + i + 4, vh1);
}
for (; i < frame_size; ++i) {
cell_state[i] = lite::arm::math::active_f32<Act>(cell_state[i]);
if (hidden_prev) {
prev = hidden_prev[i];
}
hidden[i] =
prev * (1.f - updata_gate[i]) + updata_gate[i] * cell_state[i];
}
}
updata_gate += stride_update;
cell_state += stride_cell_state;
if (hidden_prev) {
hidden_prev += stride_hidden_prev;
}
hidden += stride_hidden;
}
}
inline void gru_unit_reset_act(lite_api::ActivationType act_type,
GRUMetaValue<float> value,
int frame_size,
int batch_size) {
auto updata_gate = value.gate_value;
auto reset_gate = value.gate_value + frame_size;
auto hidden_prev = value.prev_out_value;
auto reset_hidden_prev = value.reset_output_value;
int stride_update = 3 * frame_size;
int stride_reset = 3 * frame_size;
int stride_hidden_prev = frame_size;
int stride_reset_hidden_prev = frame_size;
switch (act_type) {
case lite_api::ActivationType::kIndentity:
gru_unit_reset_act_impl<lite_api::ActivationType::kIndentity>(
updata_gate,
stride_update,
reset_gate,
stride_reset,
hidden_prev,
stride_hidden_prev,
reset_hidden_prev,
stride_reset_hidden_prev,
frame_size,
batch_size);
break;
case lite_api::ActivationType::kTanh:
gru_unit_reset_act_impl<lite_api::ActivationType::kTanh>(
updata_gate,
stride_update,
reset_gate,
stride_reset,
hidden_prev,
stride_hidden_prev,
reset_hidden_prev,
stride_reset_hidden_prev,
frame_size,
batch_size);
break;
case lite_api::ActivationType::kSigmoid:
gru_unit_reset_act_impl<lite_api::ActivationType::kSigmoid>(
updata_gate,
stride_update,
reset_gate,
stride_reset,
hidden_prev,
stride_hidden_prev,
reset_hidden_prev,
stride_reset_hidden_prev,
frame_size,
batch_size);
break;
case lite_api::ActivationType::kRelu:
gru_unit_reset_act_impl<lite_api::ActivationType::kRelu>(
updata_gate,
stride_update,
reset_gate,
stride_reset,
hidden_prev,
stride_hidden_prev,
reset_hidden_prev,
stride_reset_hidden_prev,
frame_size,
batch_size);
break;
default:
break;
}
}
inline void gru_unit_out_act(lite_api::ActivationType act_type,
bool origin_mode,
GRUMetaValue<float> value,
int frame_size,
int batch_size) {
auto updata_gate = value.gate_value;
auto cell_state = value.gate_value + 2 * frame_size;
auto hidden_prev = value.prev_out_value;
auto hidden = value.output_value;
int stride_update = 3 * frame_size;
int stride_cell_state = 3 * frame_size;
int stride_hidden_prev = frame_size;
int stride_hidden = frame_size;
switch (act_type) {
case lite_api::ActivationType::kIndentity:
gru_unit_out_act_impl<lite_api::ActivationType::kIndentity>(
origin_mode,
updata_gate,
stride_update,
cell_state,
stride_cell_state,
hidden_prev,
stride_hidden_prev,
hidden,
stride_hidden,
frame_size,
batch_size);
break;
case lite_api::ActivationType::kTanh:
gru_unit_out_act_impl<lite_api::ActivationType::kTanh>(origin_mode,
updata_gate,
stride_update,
cell_state,
stride_cell_state,
hidden_prev,
stride_hidden_prev,
hidden,
stride_hidden,
frame_size,
batch_size);
break;
case lite_api::ActivationType::kSigmoid:
gru_unit_out_act_impl<lite_api::ActivationType::kSigmoid>(
origin_mode,
updata_gate,
stride_update,
cell_state,
stride_cell_state,
hidden_prev,
stride_hidden_prev,
hidden,
stride_hidden,
frame_size,
batch_size);
break;
case lite_api::ActivationType::kRelu:
gru_unit_out_act_impl<lite_api::ActivationType::kRelu>(origin_mode,
updata_gate,
stride_update,
cell_state,
stride_cell_state,
hidden_prev,
stride_hidden_prev,
hidden,
stride_hidden,
frame_size,
batch_size);
break;
default:
break;
}
}
template <typename T>
struct GRUUnitFunctor {
static void compute(GRUMetaValue<T> value,
int frame_size,
int batch_size,
const lite_api::ActivationType active_node,
const lite_api::ActivationType active_gate,
bool origin_mode,
ARMContext* ctx) {
if (value.prev_out_value) {
sgemm(false,
false,
batch_size,
frame_size * 2,
frame_size,
1.f,
value.prev_out_value,
frame_size,
value.gate_weight,
frame_size * 2,
1.f,
value.gate_value,
frame_size * 3,
nullptr,
false,
false,
ctx);
}
gru_unit_reset_act(active_gate, value, frame_size, batch_size);
if (value.prev_out_value) {
sgemm(false,
false,
batch_size,
frame_size,
frame_size,
1.f,
value.reset_output_value,
frame_size,
value.state_weight,
frame_size,
1.f,
value.gate_value + frame_size * 2,
frame_size * 3,
nullptr,
false,
false,
ctx);
}
gru_unit_out_act(active_node, origin_mode, value, frame_size, batch_size);
}
};
} // namespace math
} // namespace arm
} // namespace lite
} // namespace paddle
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 32;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
add.h | #pragma once
#include <vector>
#include <unordered_map>
#include <algorithm>
#include <omp.h>
#include "_cuda.h"
using std::vector;
using std::unordered_map;
using std::max;
template <class T>
void add(T *x, int N, T v) {
for (int i=0; i<N; i++)
x[i] += v;
}
template <class T>
void add(vector<T>& x, T v) {
add(x.data(), x.size(), v);
}
template <class K, class T>
void add(unordered_map<K, T>& x, T v) {
for (auto& p : x) p.second += v;
}
template <class T>
void add(T *a, T *x, T *y, int N) {
for (int i=0; i<N; i++)
a[i] = x[i] + y[i];
}
template <class T>
void add(vector<T>& a, vector<T>& x, vector<T>& y) {
return add(a.data(), x.data(), y.data(), a.size());
}
template <class K, class T>
void add(unordered_map<K, T>& a, unordered_map<K, T>& x, unordered_map<K, T> y) {
for (auto& p : x)
a[p.first] = x[p.first] + y[p.first];
}
template <class T, class C>
void addAt(T *x, C&& is , T v) {
for (int i : is)
x[i] += v;
}
template <class T, class C>
void addAt(vector<T>& x, C&& is, T v) {
addAt(x.data(), is, v);
}
template <class K, class T, class C>
void addAt(unordered_map<K, T>& x, C&& ks, T v) {
for (auto&& k : ks)
x[k] += v;
}
template <class T>
void addOmp(T *x, int N, T v) {
#pragma omp parallel for
for (int i=0; i<N; i++)
x[i] += v;
}
template <class T>
void addOmp(vector<T>& x, T v) {
addOmp(x.data(), x.size(), v);
}
template <class T>
__device__ void addKernelLoop(T *a, int N, T v, int i, int DI) {
for (; i<N; i+=DI)
a[i] += v;
}
template <class T>
__global__ void addKernel(T *a, int N, T v) {
DEFINE(t, b, B, G);
addKernelLoop(a, N, v, B*b+t, G*B);
}
template <class T>
void addCuda(T *a, int N, T v) {
int threads = _THREADS;
int blocks = min(ceilDiv(N, threads), _BLOCKS);
size_t A1 = N * sizeof(T);
T *aD;
TRY( cudaMalloc(&aD, A1) );
TRY( cudaMemcpy(aD, a, A1, cudaMemcpyHostToDevice) );
addKernel<<<blocks, threads>>>(aD, N, v);
TRY( cudaMemcpy(a, aD, A1, cudaMemcpyDeviceToHost) );
TRY( cudaFree(aD) );
}
template <class T>
void addCuda(vector<T>& x, T v) {
addCuda(x.data(), x.size(), v);
}
template <class T>
__device__ void addKernelLoop(T *a, T *x, T *y, int N, int i, int DI) {
for (; i<N; i+=DI)
a[i] = x[i] + y[i];
}
template <class T>
__global__ void addKernel(T *a, T *x, T *y, int N) {
DEFINE(t, b, B, G);
addKernelLoop(a, x, y, N, B*b+t, G*B);
}
template <class T>
void addCuda(T *a, T *x, T *y, int N) {
int threads = _THREADS;
int blocks = min(ceilDiv(N, threads), _BLOCKS);
size_t A1 = N * sizeof(T);
T *xD, *yD;
TRY( cudaMalloc(&xD, A1) );
TRY( cudaMalloc(&yD, A1) );
TRY( cudaMemcpy(xD, x, A1, cudaMemcpyHostToDevice) );
TRY( cudaMemcpy(yD, y, A1, cudaMemcpyHostToDevice) );
addKernel<<<blocks, threads>>>(xD, xD, yD, N);
TRY( cudaMemcpy(a, xD, A1, cudaMemcpyDeviceToHost) );
TRY( cudaFree(xD) );
TRY( cudaFree(yD) );
}
template <class T>
void addCuda(vector<T>& a, vector<T>& x, vector<T>& y) {
addCuda(a.data(), x.data(), y.data(), a.size());
}
|
GB_unaryop__ainv_uint32_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint32_uint64
// op(A') function: GB_tran__ainv_uint32_uint64
// C type: uint32_t
// A type: uint64_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint32_uint64
(
uint32_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dkfun.c |
/*** DKFUN.C ******/
BNUM DKFmagicE(int pm)
{
LNUM can,cin,cfi;
LNUM cun,cpas;
BNUM expa;
int m,rm;
m=pm;rm=m-1;
assert(m>0);assert(m<129);
if(RTU[rm])
{cfi=DKpot(rm+1)-1;cin=cfi;cpas=2;
}
else
{
cin=DKpot(rm)+1;cfi=DKpot(rm+1)-1;
cpas=PW2[RUD[rm]];
while((cin%cpas) NE (cpas-1))cin+=2;
}
expa=0;if(m EQ (0+1))expa++;
for(can=cin;can<=cfi;can+=cpas)
{
cun=can;
for(int r=rm;r>1;r--)
{if( (can&PW2[r]) NE 0)can=(can|RN[r]);
}
expa++;
}
return expa;
}
void DKFdemoMP(void)
{
#define dkfdemo01 150
BNUM E[dkfdemo01];BNUM F;
int n,ncpu;
printf("DKFdemoMP DKfun.c \n");
#pragma omp master
omp_set_num_threads(1);
ncpu=omp_get_num_procs();
n= 56;assert(n< dkfdemo01);
DKgen(n);
for(int m=0;m< dkfdemo01;m++)E[m]=0;
omp_set_num_threads(ncpu);
#pragma omp parallel
{
for(int m= 1;m<= n;m++)
{
int ntask;
#pragma omp single
if(E[m] EQ 0)
{
E[m]= DKFmagicE(m);
ntask=omp_get_thread_num();
printf("E(%d)=%I64d (task %d) \n"
,m,E[m],ntask);
}
}
}
#pragma omp master
{
F=0;
for(int m=1;m<=n;m++)F+=E[m];
printf("F(%d)= %I64d \n",n,F);
}
}
void DKFdemo(void)
{
BNUM E;
for(int n=5;n<=64;n=2*n-1)
{
DKgen(n);E= DKFmagicE(n);
printf("E(%d)= %I64d \n",n,E);
}
}
|
vednnActivationBackward.c | #include "vednnActivationBackward.h"
#include "vednn-def.h"
#include <stdio.h>
#include <stdint.h>
static inline vednnError_t
vednnActivationBackward_wrapper( vednnActivationBackward_t pFunc,
VEDNN_ACTIVATIONBKW_ARGS )
{
#ifndef VEDNN_USE_OPENMP
return pFunc(VEDNN_ACTIVATIONBKW_ARGS_LIST);
#else
if ( __vednn_omp_num_threads == 1 ) {
return pFunc(VEDNN_ACTIVATIONBKW_ARGS_LIST);
}
else {
vednnError_t rc = VEDNN_SUCCESS ;
#pragma omp parallel reduction(|:rc)
{
int64_t nthreads = omp_get_num_threads() ;
int64_t threadid = omp_get_thread_num() ;
int64_t eachNElement = nElements / nthreads ;
int64_t remain = nElements % nthreads ;
int64_t elementBegin = eachNElement * threadid + ( threadid < remain ? threadid : remain ) ;
int64_t myElement = eachNElement + ( threadid < remain ? 1 : 0 ) ;
if( myElement == 0 ) {
rc |= VEDNN_SUCCESS ;
}
else {
float* _pDataGradOut = ((float *)pDataGradOut) + elementBegin ;
float* _pDataIn = ((float *)pDataIn) + elementBegin ;
float* _pDataGradIn = ((float *)pDataGradIn) + elementBegin ;
rc |= pFunc((void*)_pDataGradOut, (void*)_pDataIn, (void*) _pDataGradIn, myElement) ;
}
}
return rc ;
}
#endif // openmp
}
/* ------------------------- public API ---------------------------------- */
vednnError_t vednnActivationBackward(
const vednnActivationMode_t mode,
VEDNN_ACTIVATIONBKW_ARGS)
{
#define OMPWRAP( IMPL ) WRAP_RET(IMPL, \
vednnActivationBackward_wrapper, VEDNN_ACTIVATIONBKW_ARGS_LIST)
switch(mode) {
case VEDNN_ACTIVATION_RELU :
OMPWRAP( vednnActivationBackward_Relu );
}
fprintf(stderr, "VEDNN Error : vednnActivationBackward : Invalid Parameter !!\n") ;
return VEDNN_ERROR_INVALID_PARAM ;
#undef OMPWRAP
}
// vim: et sw=2 ts=2
|
GB_unop__acos_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__acos_fc64_fc64)
// op(A') function: GB (_unop_tran__acos_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = cacos (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cacos (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = cacos (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ACOS || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__acos_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = cacos (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = cacos (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__acos_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.